issue
dict
pr
dict
pr_details
dict
{ "body": "After #21123 when Elasticsearch receive a HEAD request it returns the Content-Length of the that it would return for a GET request with an empty response body. Except in the document exists, index exists, and type exists requests which return 0. We should fix them to also return the Content-Length that would be in the response.\n", "comments": [ { "body": "I'm adding the v5.1.0 label too, I think we should target a fix there.\n", "created_at": "2016-10-26T05:16:19Z" }, { "body": "These are all addressed now. Closing.", "created_at": "2017-06-12T12:10:12Z" } ], "number": 21125, "title": "Some endpoints return Content-Length: 0 for HEAD requests" }
{ "body": "Get HEAD requests incorrectly return a content-length header of 0. This\r\ncommit addresses this by removing the special handling for get HEAD\r\nrequests, and just relying on the general mechanism that exists for\r\nhandling HEAD requests in the REST layer.\r\n\r\nRelates #21125\r\n", "number": 23186, "review_comments": [], "title": "Fix get HEAD requests" }
{ "commits": [ { "message": "Fix get HEAD requests\n\nGet HEAD requests incorrectly return a content-length header of 0. This\ncommit addresses this by removing the special handling for get HEAD\nrequests, and just relying on the general mechanism that exists for\nhandling HEAD requests in the REST layer." } ], "files": [ { "diff": "@@ -288,7 +288,6 @@\n import org.elasticsearch.rest.action.document.RestDeleteAction;\n import org.elasticsearch.rest.action.document.RestGetAction;\n import org.elasticsearch.rest.action.document.RestGetSourceAction;\n-import org.elasticsearch.rest.action.document.RestHeadAction;\n import org.elasticsearch.rest.action.document.RestIndexAction;\n import org.elasticsearch.rest.action.document.RestMultiGetAction;\n import org.elasticsearch.rest.action.document.RestMultiTermVectorsAction;\n@@ -563,7 +562,6 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) {\n registerHandler.accept(new RestIndexAction(settings, restController));\n registerHandler.accept(new RestGetAction(settings, restController));\n registerHandler.accept(new RestGetSourceAction(settings, restController));\n- registerHandler.accept(new RestHeadAction.Document(settings, restController));\n registerHandler.accept(new RestMultiGetAction(settings, restController));\n registerHandler.accept(new RestDeleteAction(settings, restController));\n registerHandler.accept(new org.elasticsearch.rest.action.document.RestCountAction(settings, restController));", "filename": "core/src/main/java/org/elasticsearch/action/ActionModule.java", "status": "modified" }, { "diff": "@@ -36,13 +36,16 @@\n import java.io.IOException;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n+import static org.elasticsearch.rest.RestRequest.Method.HEAD;\n import static org.elasticsearch.rest.RestStatus.NOT_FOUND;\n import static org.elasticsearch.rest.RestStatus.OK;\n \n public class RestGetAction extends BaseRestHandler {\n- public RestGetAction(Settings settings, RestController controller) {\n+\n+ public RestGetAction(final Settings settings, final RestController controller) {\n super(settings);\n controller.registerHandler(GET, \"/{index}/{type}/{id}\", this);\n+ controller.registerHandler(HEAD, \"/{index}/{type}/{id}\", this);\n }\n \n @Override\n@@ -55,14 +58,14 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n getRequest.preference(request.param(\"preference\"));\n getRequest.realtime(request.paramAsBoolean(\"realtime\", getRequest.realtime()));\n if (request.param(\"fields\") != null) {\n- throw new IllegalArgumentException(\"The parameter [fields] is no longer supported, \" +\n+ throw new IllegalArgumentException(\"the parameter [fields] is no longer supported, \" +\n \"please use [stored_fields] to retrieve stored fields or [_source] to load the field from _source\");\n }\n- String sField = request.param(\"stored_fields\");\n- if (sField != null) {\n- String[] sFields = Strings.splitStringByCommaToArray(sField);\n- if (sFields != null) {\n- getRequest.storedFields(sFields);\n+ final String fieldsParam = request.param(\"stored_fields\");\n+ if (fieldsParam != null) {\n+ final String[] fields = Strings.splitStringByCommaToArray(fieldsParam);\n+ if (fields != null) {\n+ getRequest.storedFields(fields);\n }\n }\n \n@@ -73,9 +76,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n \n return channel -> client.get(getRequest, new RestToXContentListener<GetResponse>(channel) {\n @Override\n- protected RestStatus getStatus(GetResponse response) {\n+ protected RestStatus getStatus(final GetResponse response) {\n return response.isExists() ? OK : NOT_FOUND;\n }\n });\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java", "status": "modified" }, { "diff": "@@ -33,7 +33,6 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.rest.RestStatus.NOT_FOUND;\n import static org.elasticsearch.rest.RestStatus.OK;\n-import static org.hamcrest.Matchers.empty;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n \n@@ -62,20 +61,21 @@ private void createTestDoc(final String indexName, final String typeName) throws\n \n public void testDocumentExists() throws IOException {\n createTestDoc();\n- headTestCase(\"test/test/1\", emptyMap(), equalTo(0));\n- headTestCase(\"test/test/1\", singletonMap(\"pretty\", \"true\"), equalTo(0));\n+ headTestCase(\"/test/test/1\", emptyMap(), greaterThan(0));\n+ headTestCase(\"/test/test/1\", singletonMap(\"pretty\", \"true\"), greaterThan(0));\n+ headTestCase(\"/test/test/2\", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0));\n }\n \n public void testIndexExists() throws IOException {\n createTestDoc();\n- headTestCase(\"test\", emptyMap(), greaterThan(0));\n- headTestCase(\"test\", singletonMap(\"pretty\", \"true\"), greaterThan(0));\n+ headTestCase(\"/test\", emptyMap(), greaterThan(0));\n+ headTestCase(\"/test\", singletonMap(\"pretty\", \"true\"), greaterThan(0));\n }\n \n public void testTypeExists() throws IOException {\n createTestDoc();\n- headTestCase(\"test/test\", emptyMap(), equalTo(0));\n- headTestCase(\"test/test\", singletonMap(\"pretty\", \"true\"), equalTo(0));\n+ headTestCase(\"/test/test\", emptyMap(), equalTo(0));\n+ headTestCase(\"/test/test\", singletonMap(\"pretty\", \"true\"), equalTo(0));\n }\n \n public void testAliasExists() throws IOException {", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java", "status": "modified" } ] }
{ "body": "If you send a 100-continue header to Elasticsearch with a content-length header that exceeds the configured max content length, Elasticsearch responds with a 417 status (Expectation Failed). Since we intend to support the 100-continue header (we basically have to since curl automatically sends it as soon as the content-length is at least 1024 bytes), this is a violation of the HTTP specification; instead, we should be responding with 413 status (Request Entity Too Large). The 417 status is intended to only be used if the server does not support an expectation that the client specified in the expect header. That brings us to our second source of trouble. The specification defines expectation extensions and this is where 417 status replies are okay. If the client sends an expect header containing anything other than 100-continue, we should be replying with 417 status but instead we reply with 200 status.\r\n\r\nThe relevant section from the specification:\r\n\r\n>The server MUST respond with a 417 (Expectation Failed) status if any of the expectations cannot be met or, if there are other problems with the request, some other 4xx status.\r\n\r\nOf the two aforementioned problems, it's the first that is awful for end users. This is because if try to send a request to Elasticsearch that exceeds the content-length, Elasticsearch does not help them by telling them the request is too large. Instead, it provides the completely misleading 417 status. At this point, users are rightfully lost. With Elasticsearch configured to have a max content length of 100MiB:\r\n\r\n```\r\n17:43:18 [jason:~] $ dd if=/dev/zero of=zero count=128 bs=1024k\r\n128+0 records in\r\n128+0 records out\r\n134217728 bytes transferred in 0.099644 secs (1346970013 bytes/sec)\r\n17:43:19 [jason:~] $ curl -v localhost:9200/ --data-binary @zero\r\n* Trying ::1...\r\n* TCP_NODELAY set\r\n* Connected to localhost (::1) port 9200 (#0)\r\n> POST / HTTP/1.1\r\n> Host: localhost:9200\r\n> User-Agent: curl/7.51.0\r\n> Accept: */*\r\n> Content-Length: 134217728\r\n> Content-Type: application/x-www-form-urlencoded\r\n> Expect: 100-continue\r\n> \r\n< HTTP/1.1 417 Expectation Failed\r\n< content-length: 0\r\n* HTTP error before end of send, stop sending\r\n< \r\n* Curl_http_done: called premature == 0\r\n* Closing connection 0\r\n17:43:23 [jason:~] $ curl -v -H \"Expect: chocolate=yummy\" localhost:9200/\r\n* Trying ::1...\r\n* TCP_NODELAY set\r\n* Connected to localhost (::1) port 9200 (#0)\r\n> GET / HTTP/1.1\r\n> Host: localhost:9200\r\n> User-Agent: curl/7.51.0\r\n> Accept: */*\r\n> Expect: chocolate=yummy\r\n> \r\n< HTTP/1.1 200 OK\r\n< content-type: application/json; charset=UTF-8\r\n< content-length: 322\r\n< \r\n{\r\n \"name\" : \"49wXA7G\",\r\n \"cluster_name\" : \"elasticsearch\",\r\n \"cluster_uuid\" : \"J33uKzjOSi2PYl3dlE1R_A\",\r\n \"version\" : {\r\n \"number\" : \"6.0.0-alpha1\",\r\n \"build_hash\" : \"5343b87\",\r\n \"build_date\" : \"2017-02-13T22:41:34.078Z\",\r\n \"build_snapshot\" : true,\r\n \"lucene_version\" : \"6.5.0\"\r\n },\r\n \"tagline\" : \"You Know, for Search\"\r\n}\r\n* Curl_http_done: called premature == 0\r\n* Connection #0 to host localhost left intact\r\n```\r\n\r\nThe cause of this issue is due to a specification violation in the Netty 4 HTTP codec. I have opened netty/netty#6374 and with this patch, Elasticsearch will do the right thing:\r\n\r\n```\r\n17:43:25 [jason:~] $ dd if=/dev/zero of=zero count=128 bs=1024k\r\n128+0 records in\r\n128+0 records out\r\n134217728 bytes transferred in 0.091381 secs (1468769446 bytes/sec)\r\n17:43:56 [jason:~] $ curl -v localhost:9200/ --data-binary @zero\r\n* Trying ::1...\r\n* TCP_NODELAY set\r\n* Connected to localhost (::1) port 9200 (#0)\r\n> POST / HTTP/1.1\r\n> Host: localhost:9200\r\n> User-Agent: curl/7.51.0\r\n> Accept: */*\r\n> Content-Length: 134217728\r\n> Content-Type: application/x-www-form-urlencoded\r\n> Expect: 100-continue\r\n> \r\n< HTTP/1.1 413 Request Entity Too Large\r\n< content-length: 0\r\n* HTTP error before end of send, stop sending\r\n< \r\n* Curl_http_done: called premature == 0\r\n* Closing connection 0\r\n17:43:58 [jason:~] $ curl -v -H \"Expect: chocolate=yummy\" localhost:9200/\r\n* Trying ::1...\r\n* TCP_NODELAY set\r\n* Connected to localhost (::1) port 9200 (#0)\r\n> GET / HTTP/1.1\r\n> Host: localhost:9200\r\n> User-Agent: curl/7.51.0\r\n> Accept: */*\r\n> Expect: chocolate=yummy\r\n> \r\n< HTTP/1.1 417 Expectation Failed\r\n< content-length: 0\r\n< \r\n* Curl_http_done: called premature == 0\r\n* Connection #0 to host localhost left intact\r\n```", "comments": [ { "body": "This is blocked on a release of Netty that incorporates netty/netty#6374.", "created_at": "2017-02-14T22:49:18Z" }, { "body": "The PR netty/netty#6374 has been integrated, we only awaiting a release now.", "created_at": "2017-02-15T14:06:03Z" }, { "body": "@jasontedor I understand this is an awful old bug and closed, however I am having this issue trying to query ElasticSearch from the PowerBI desktop Web DataSource even on 6.0.1 version. \r\n\r\nIs this suppose to still happen? I have been reading that PowerBI does actually add the Expect: 100-Continue header so this is expected. \r\n\r\nWhat is the workaround/solution here then? Changing the content-length setting in ElasticSearch?\r\n\r\nhttps://stackoverflow.com/questions/566437/http-post-returns-error-417-expectation-failed\r\n\r\nEdit: This issue is solved and the header had nothing to do. I was actually testing this behind a proxy and that was actually catching the requests before my nginx/elasticsearch/powerbi. I am getting a correct response now. ", "created_at": "2017-12-10T22:20:02Z" } ], "number": 23172, "title": "Elasticsearch incorrectly handles expect header" }
{ "body": "This commit adds unit tests for two cases where Elasticsearch violates expect header handling. These tests are marked as awaits fix.\r\n\r\nRelates #23172", "number": 23173, "review_comments": [], "title": "Add failing tests for expect header violations" }
{ "commits": [ { "message": "Add failing tests for expect header violations\n\nThis commit adds unit tests for two cases where Elasticsearch violates\nexpect header handling. This tests are marked as awaits fix." }, { "message": "More correct test name" } ], "files": [ { "diff": "@@ -25,6 +25,8 @@\n import io.netty.handler.codec.http.DefaultFullHttpRequest;\n import io.netty.handler.codec.http.FullHttpRequest;\n import io.netty.handler.codec.http.FullHttpResponse;\n+import io.netty.handler.codec.http.HttpHeaderNames;\n+import io.netty.handler.codec.http.HttpHeaderValues;\n import io.netty.handler.codec.http.HttpMethod;\n import io.netty.handler.codec.http.HttpResponseStatus;\n import io.netty.handler.codec.http.HttpUtil;\n@@ -137,8 +139,42 @@ public void testCorsConfigWithDefaults() {\n \n /**\n * Test that {@link Netty4HttpServerTransport} supports the \"Expect: 100-continue\" HTTP header\n+ * @throws InterruptedException if the client communication with the server is interrupted\n */\n- public void testExpectContinueHeader() throws Exception {\n+ public void testExpectContinueHeader() throws InterruptedException {\n+ final Settings settings = Settings.EMPTY;\n+ final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt());\n+ runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE);\n+ }\n+\n+ /**\n+ * Test that {@link Netty4HttpServerTransport} responds to a 100-continue expectation with too large a content-length with a 413 status.\n+ * @throws InterruptedException if the client communication with the server is interrupted\n+ */\n+ @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/23172\")\n+ public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException {\n+ final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey();\n+ final int maxContentLength = randomIntBetween(1, 104857600);\n+ final Settings settings = Settings.builder().put(key, maxContentLength + \"b\").build();\n+ final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE);\n+ runExpectHeaderTest(\n+ settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE);\n+ }\n+\n+ /**\n+ * Test that {@link Netty4HttpServerTransport} responds to an unsupported expectation with a 417 status.\n+ * @throws InterruptedException if the client communication with the server is interrupted\n+ */\n+ @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/23172\")\n+ public void testExpectUnsupportedExpectation() throws InterruptedException {\n+ runExpectHeaderTest(Settings.EMPTY, \"chocolate=yummy\", 0, HttpResponseStatus.EXPECTATION_FAILED);\n+ }\n+\n+ private void runExpectHeaderTest(\n+ final Settings settings,\n+ final String expectation,\n+ final int contentLength,\n+ final HttpResponseStatus expectedStatus) throws InterruptedException {\n final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {\n @Override\n public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {\n@@ -150,23 +186,24 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC\n throw new AssertionError();\n }\n };\n- try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool,\n+ try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool,\n xContentRegistry(), dispatcher)) {\n transport.start();\n- TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());\n-\n+ final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());\n try (Netty4HttpClient client = new Netty4HttpClient()) {\n- FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"/\");\n- HttpUtil.set100ContinueExpected(request, true);\n- HttpUtil.setContentLength(request, 10);\n+ final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"/\");\n+ request.headers().set(HttpHeaderNames.EXPECT, expectation);\n+ HttpUtil.setContentLength(request, contentLength);\n \n- FullHttpResponse response = client.post(remoteAddress.address(), request);\n- assertThat(response.status(), is(HttpResponseStatus.CONTINUE));\n+ final FullHttpResponse response = client.post(remoteAddress.address(), request);\n+ assertThat(response.status(), equalTo(expectedStatus));\n+ if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) {\n+ final FullHttpRequest continuationRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"/\", Unpooled.EMPTY_BUFFER);\n+ final FullHttpResponse continuationResponse = client.post(remoteAddress.address(), continuationRequest);\n \n- request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"/\", Unpooled.EMPTY_BUFFER);\n- response = client.post(remoteAddress.address(), request);\n- assertThat(response.status(), is(HttpResponseStatus.OK));\n- assertThat(new String(ByteBufUtil.getBytes(response.content()), StandardCharsets.UTF_8), is(\"done\"));\n+ assertThat(continuationResponse.status(), is(HttpResponseStatus.OK));\n+ assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is(\"done\"));\n+ }\n }\n }\n }", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java", "status": "modified" } ] }
{ "body": "I'm on 5.1.2 and I just noticed locally that my tests started to fail due to the length of the index names being passed. **Obviously I should be using aliases for this......**\r\n\r\n```\r\nPOST /test-events-2016.08.12,test-events-2016.08.13,test-events-2016.08.14,test-events-2016.08.15,test-events-2016.08.16,test-events-2016.08.17,test-events-2016.08.18,test-events-2016.08.19,test-events-2016.08.20,test-events-2016.08.21,test-events-2016.08.22,test-events-2016.08.23,test-events-2016.08.24,test-events-2016.08.25,test-events-2016.08.26,test-events-2016.08.27,test-events-2016.08.28,test-events-2016.08.29,test-events-2016.08.30,test-events-2016.08.31,test-events-2016.09.01,test-events-2016.09.02,test-events-2016.09.03,test-events-2016.09.04,test-events-2016.09.05,test-events-2016.09.06,test-events-2016.09.07,test-events-2016.09.08,test-events-2016.09.09,test-events-2016.09.10,test-events-2016.09.11,test-events-2016.09.12,test-events-2016.09.13,test-events-2016.09.14,test-events-2016.09.15,test-events-2016.09.16,test-events-2016.09.17,test-events-2016.09.18,test-events-2016.09.19,test-events-2016.09.20,test-events-2016.09.21,test-events-2016.09.22,test-events-2016.09.23,test-events-2016.09.24,test-events-2016.09.25,test-events-2016.09.26,test-events-2016.09.27,test-events-2016.09.28,test-events-2016.09.29,test-events-2016.09.30,test-events-2016.10.01,test-events-2016.10.02,test-events-2016.10.03,test-events-2016.10.04,test-events-2016.10.05,test-events-2016.10.06,test-events-2016.10.07,test-events-2016.10.08,test-events-2016.10.09,test-events-2016.10.10,test-events-2016.10.11,test-events-2016.10.12,test-events-2016.10.13,test-events-2016.10.14,test-events-2016.10.15,test-events-2016.10.16,test-events-2016.10.17,test-events-2016.10.18,test-events-2016.10.19,test-events-2016.10.20,test-events-2016.10.21,test-events-2016.10.22,test-events-2016.10.23,test-events-2016.10.24,test-events-2016.10.25,test-events-2016.10.26,test-events-2016.10.27,test-events-2016.10.28,test-events-2016.10.29,test-events-2016.10.30,test-events-2016.10.31,test-events-2016.11.01,test-events-2016.11.02,test-events-2016.11.03,test-events-2016.11.04,test-events-2016.11.05,test-events-2016.11.06,test-events-2016.11.07,test-events-2016.11.08,test-events-2016.11.09,test-events-2016.11.10,test-events-2016.11.11,test-events-2016.11.12,test-events-2016.11.13,test-events-2016.11.14,test-events-2016.11.15,test-events-2016.11.16,test-events-2016.11.17,test-events-2016.11.18,test-events-2016.11.19,test-events-2016.11.20,test-events-2016.11.21,test-events-2016.11.22,test-events-2016.11.23,test-events-2016.11.24,test-events-2016.11.25,test-events-2016.11.26,test-events-2016.11.27,test-events-2016.11.28,test-events-2016.11.29,test-events-2016.11.30,test-events-2016.12.01,test-events-2016.12.02,test-events-2016.12.03,test-events-2016.12.04,test-events-2016.12.05,test-events-2016.12.06,test-events-2016.12.07,test-events-2016.12.08,test-events-2016.12.09,test-events-2016.12.10,test-events-2016.12.11,test-events-2016.12.12,test-events-2016.12.13,test-events-2016.12.14,test-events-2016.12.15,test-events-2016.12.16,test-events-2016.12.17,test-events-2016.12.18,test-events-2016.12.19,test-events-2016.12.20,test-events-2016.12.21,test-events-2016.12.22,test-events-2016.12.23,test-events-2016.12.24,test-events-2016.12.25,test-events-2016.12.26,test-events-2016.12.27,test-events-2016.12.28,test-events-2016.12.29,test-events-2016.12.30,test-events-2016.12.31,test-events-2017.01.01,test-events-2017.01.02,test-events-2017.01.03,test-events-2017.01.04,test-events-2017.01.05,test-events-2017.01.06,test-events-2017.01.07,test-events-2017.01.08,test-events-2017.01.09,test-events-2017.01.10,test-events-2017.01.11,test-events-2017.01.12,test-events-2017.01.13,test-events-2017.01.14,test-events-2017.01.15,test-events-2017.01.16,test-events-2017.01.17,test-events-2017.01.18,test-events-2017.01.19,test-events-2017.01.20,test-events-2017.01.21,test-events-2017.01.22,test-events-2017.01.23,test-events-2017.01.24,test-events-2017.01.25,test-events-2017.01.26,test-events-2017.01.27,test-events-2017.01.28,test-events-2017.01.29,test-events-2017.01.30,test-events-2017.01.31,test-events-2017.02.01,test-events-2017.02.02,test-events-2017.02.03,test-events-2017.02.04,test-events-2017.02.05,test-events-2017.02.06,test-events-2017.02.07/events/_search?ignore_unavailable=true\r\n{\r\n \"size\": 11,\r\n \"sort\": [\r\n {\r\n \"date\": {\r\n \"order\": \"desc\"\r\n }\r\n }\r\n ],\r\n \"_source\": {\r\n \"excludes\": [\r\n \"idx\",\r\n \"ip\",\r\n \"error\"\r\n ]\r\n },\r\n \"query\": {\r\n \"bool\": {\r\n \"filter\": [\r\n {\r\n \"bool\": {\r\n \"must\": [\r\n {\r\n \"range\": {\r\n \"date\": {\r\n \"gte\": \"2016-08-12T00:00:00Z\",\r\n \"lte\": \"2017-02-07T23:53:14.9558531Z\"\r\n }\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"is_deleted\": {\r\n \"value\": false\r\n }\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"stack_id\": {\r\n \"value\": \"1ecd0826e447a44e78877ab1\"\r\n }\r\n }\r\n }\r\n ],\r\n \"must_not\": [\r\n {\r\n \"ids\": {\r\n \"values\": [\r\n \"589a5deaf8a1031b5ce541cc\"\r\n ]\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n}\r\n````\r\n\r\n```\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"index_not_found_exception\",\r\n \"reason\": \"no such index\",\r\n \"resource.type\": \"index_or_alias\",\r\n \"resource.id\": \"bad-request\",\r\n \"index_uuid\": \"_na_\",\r\n \"index\": \"bad-request\"\r\n }\r\n ],\r\n \"type\": \"index_not_found_exception\",\r\n \"reason\": \"no such index\",\r\n \"resource.type\": \"index_or_alias\",\r\n \"resource.id\": \"bad-request\",\r\n \"index_uuid\": \"_na_\",\r\n \"index\": \"bad-request\"\r\n },\r\n \"status\": 404\r\n}\r\n```", "comments": [ { "body": "You're exceeding the configured header length, so yes, this request would not work. But it is very weird that the `bad_request` exception is being used as an index name!", "created_at": "2017-02-09T20:32:45Z" }, { "body": "yeah, and it was a 404 instead of 400 :), it might be good to return a good error message here like `you are a nub, use an alias`", "created_at": "2017-02-09T21:41:27Z" }, { "body": "I had a quick look at this. We get the `/bad-request` uri from netty, rather than the provided long one, that clearly causes problems. I need to look into why this happens and what we can do to fix this.", "created_at": "2017-02-09T23:22:34Z" }, { "body": "There's a broader problem here as:\r\n\r\n```\r\nPOST \"/i/t/1?routing=a a\"\r\n{\r\n \"f\": \"v\"\r\n}\r\n```\r\n\r\nresults in the same problem (`bad-request`).\r\n\r\n", "created_at": "2017-02-12T15:37:21Z" }, { "body": "It is indeed Netty that is doing this and what they are doing that leads to this is uncool. Here's a messed up situation that I just realized that you can get into:\r\n\r\n```\r\nPUT /bad-request/t/1\r\n{\r\n \"f\": \"v\"\r\n}\r\n```\r\n\r\nThen execute any invalid request (like that in the original post here, or my [example above](https://github.com/elastic/elasticsearch/issues/23034#issuecomment-279226379)). Elasticsearch responds with a 200 OK and:\r\n\r\n```\r\n{\r\n \"bad-request\": {\r\n \"aliases\": {},\r\n \"mappings\": {\r\n \"t\": {\r\n \"properties\": {\r\n \"f\": {\r\n \"fields\": {\r\n \"keyword\": {\r\n \"ignore_above\": 256,\r\n \"type\": \"keyword\"\r\n }\r\n },\r\n \"type\": \"text\"\r\n }\r\n }\r\n }\r\n },\r\n \"settings\": {\r\n \"index\": {\r\n \"creation_date\": \"1487005074725\",\r\n \"number_of_replicas\": \"1\",\r\n \"number_of_shards\": \"5\",\r\n \"provided_name\": \"bad-request\",\r\n \"uuid\": \"CoPiLfPmRv-MdXMMcbGkWQ\",\r\n \"version\": {\r\n \"created\": \"6000001\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n```", "created_at": "2017-02-13T17:01:57Z" } ], "number": 23034, "title": "index_not_found_exception when passing in a long list of index names" }
{ "body": "When Netty decodes a bad HTTP request, it marks the decoder result on the HTTP request as a failure, and reroutes the request to GET /bad-request. This either leads to puzzling responses when a bad request is sent to Elasticsearch (if an index named \"bad-request\" does not exist then it produces an index not found exception and otherwise responds with the index settings for the index named \"bad-request\"). This commit addresses this by inspecting the decoder result on the HTTP request and dispatching the request to a bad request handler preserving the initial cause of the bad request and providing an error message to the client.\r\n\r\nCloses #23034", "number": 23153, "review_comments": [ { "body": "nit: can you make \"bad request\" more concrete/descriptive or give a example", "created_at": "2017-02-13T20:25:38Z" }, { "body": "I pushed c617b7ebbb6cc75f98c8df02e18645f6f673cd35.", "created_at": "2017-02-13T21:29:23Z" } ], "title": "Handle bad HTTP requests" }
{ "commits": [ { "message": "Handle bad HTTP requests\n\nWhen Netty decodes a bad HTTP request, it marks the decoder result on\nthe HTTP request as a failure, and reroutes the request to GET\n/bad-request. This either leads to puzzling responses when a bad request\nis sent to Elasticsearch (if an index named \"bad-request\" does not exist\nthen it produces an index not found exception and otherwise responds\nwith the index settings for the index named \"bad-request\"). This commit\naddresses this by inspecting the decoder result on the HTTP request and\ndispatching the request to a bad request handler preserving the initial\ncause of the bad request and providing an error message to the client." }, { "message": "Log failure to send message\n\nWhen dispatching a bad request, if sending the response to the client\nfails, we should log the bad request cause and the underlying cause of\nthe failure to send." }, { "message": "Handle unknown cause on bad request dispatch\n\nWhen a bad request is dispatched, in case the cause is unknown we should\nindicate such." }, { "message": "Remove unnecessary newline" }, { "message": "Fix forbidden APIs violation" }, { "message": "Expand Javadocs on Dispatcher#dispatchBadRequest" }, { "message": "Add tests for RestController#dispatchBadRequest" }, { "message": "Add license header" }, { "message": "Add REST test for bad request handling" }, { "message": "Merge branch 'master' into handle-bad-requests\n\n* master:\n Update to forbiddenapis 2.3 (improves Gradle configuration time) (#23154)\n Make the version of the remote node accessible on a transport channel (#23019)\n Fix total disk bytes returning negative value (#23093)\n Fix communication with 5.3.0 nodes\n Update redirects.asciidoc (#23148)" }, { "message": "Add missing license header" } ], "files": [ { "diff": "@@ -35,15 +35,32 @@ public interface HttpServerTransport extends LifecycleComponent {\n \n HttpStats stats();\n \n- @FunctionalInterface\n+ /**\n+ * Dispatches HTTP requests.\n+ */\n interface Dispatcher {\n+\n /**\n * Dispatches the {@link RestRequest} to the relevant request handler or responds to the given rest channel directly if\n * the request can't be handled by any request handler.\n- * @param request the request to dispatch\n- * @param channel the response channel of this request\n- * @param threadContext the nodes thread context\n+ *\n+ * @param request the request to dispatch\n+ * @param channel the response channel of this request\n+ * @param threadContext the thread context\n+ */\n+ void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext);\n+\n+ /**\n+ * Dispatches a bad request. For example, if a request is malformed it will be dispatched via this method with the cause of the bad\n+ * request.\n+ *\n+ * @param request the request to dispatch\n+ * @param channel the response channel of this request\n+ * @param threadContext the thread context\n+ * @param cause the cause of the bad request\n */\n- void dispatch(RestRequest request, RestChannel channel, ThreadContext threadContext);\n+ void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause);\n+\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/http/HttpServerTransport.java", "status": "modified" }, { "diff": "@@ -401,8 +401,7 @@ protected Node(final Environment environment, Collection<Class<? extends Plugin>\n .collect(Collectors.toList());\n final RestController restController = actionModule.getRestController();\n final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class),\n- threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService,\n- restController::dispatchRequest);\n+ threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, restController);\n final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders);\n final Transport transport = networkModule.getTransportSupplier().get();\n final TransportService transportService = newTransportService(settings, transport, threadPool,", "filename": "core/src/main/java/org/elasticsearch/node/Node.java", "status": "modified" }, { "diff": "@@ -19,18 +19,8 @@\n \n package org.elasticsearch.rest;\n \n-import java.io.ByteArrayOutputStream;\n-import java.io.IOException;\n-import java.io.InputStream;\n-import java.util.List;\n-import java.util.Locale;\n-import java.util.Objects;\n-import java.util.Set;\n-import java.util.concurrent.atomic.AtomicBoolean;\n-import java.util.function.Supplier;\n-import java.util.function.UnaryOperator;\n-\n import org.apache.logging.log4j.message.ParameterizedMessage;\n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.client.node.NodeClient;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n@@ -45,17 +35,30 @@\n import org.elasticsearch.common.util.concurrent.ThreadContext;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.common.xcontent.XContentType;\n+import org.elasticsearch.http.HttpServerTransport;\n import org.elasticsearch.http.HttpTransportSettings;\n import org.elasticsearch.indices.breaker.CircuitBreakerService;\n-import org.elasticsearch.common.xcontent.XContentType;\n+\n+import java.io.ByteArrayOutputStream;\n+import java.io.IOException;\n+import java.io.InputStream;\n+import java.util.List;\n+import java.util.Locale;\n+import java.util.Objects;\n+import java.util.Set;\n+import java.util.concurrent.atomic.AtomicBoolean;\n+import java.util.function.Supplier;\n+import java.util.function.UnaryOperator;\n \n import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;\n import static org.elasticsearch.rest.RestStatus.FORBIDDEN;\n import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR;\n import static org.elasticsearch.rest.RestStatus.NOT_ACCEPTABLE;\n import static org.elasticsearch.rest.RestStatus.OK;\n \n-public class RestController extends AbstractComponent {\n+public class RestController extends AbstractComponent implements HttpServerTransport.Dispatcher {\n+\n private final PathTrie<RestHandler> getHandlers = new PathTrie<>(RestUtils.REST_DECODER);\n private final PathTrie<RestHandler> postHandlers = new PathTrie<>(RestUtils.REST_DECODER);\n private final PathTrie<RestHandler> putHandlers = new PathTrie<>(RestUtils.REST_DECODER);\n@@ -167,6 +170,7 @@ public boolean canTripCircuitBreaker(RestRequest request) {\n return (handler != null) ? handler.canTripCircuitBreaker() : true;\n }\n \n+ @Override\n public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {\n if (request.rawPath().equals(\"/favicon.ico\")) {\n handleFavicon(request, channel);\n@@ -205,6 +209,31 @@ public void dispatchRequest(RestRequest request, RestChannel channel, ThreadCont\n }\n }\n \n+ @Override\n+ public void dispatchBadRequest(\n+ final RestRequest request,\n+ final RestChannel channel,\n+ final ThreadContext threadContext,\n+ final Throwable cause) {\n+ try {\n+ final Exception e;\n+ if (cause == null) {\n+ e = new ElasticsearchException(\"unknown cause\");\n+ } else if (cause instanceof Exception) {\n+ e = (Exception) cause;\n+ } else {\n+ e = new ElasticsearchException(cause);\n+ }\n+ channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e));\n+ } catch (final IOException e) {\n+ if (cause != null) {\n+ e.addSuppressed(cause);\n+ }\n+ logger.warn(\"failed to send bad request response\", e);\n+ channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));\n+ }\n+ }\n+\n void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext,\n final RestHandler handler) throws Exception {\n if (checkRequestParameters(request, channel) == false) {\n@@ -419,4 +448,5 @@ private static CircuitBreaker inFlightRequestsBreaker(CircuitBreakerService circ\n // We always obtain a fresh breaker to reflect changes to the breaker configuration.\n return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS);\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/rest/RestController.java", "status": "modified" }, { "diff": "@@ -491,4 +491,5 @@ private static XContentType parseContentType(List<String> header) {\n }\n throw new IllegalArgumentException(\"empty Content-Type header\");\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/rest/RestRequest.java", "status": "modified" }, { "diff": "@@ -32,6 +32,7 @@\n import org.elasticsearch.http.HttpInfo;\n import org.elasticsearch.http.HttpServerTransport;\n import org.elasticsearch.http.HttpStats;\n+import org.elasticsearch.http.NullDispatcher;\n import org.elasticsearch.indices.breaker.CircuitBreakerService;\n import org.elasticsearch.plugins.NetworkPlugin;\n import org.elasticsearch.rest.BaseRestHandler;\n@@ -298,6 +299,6 @@ public List<TransportInterceptor> getTransportInterceptors(NamedWriteableRegistr\n \n private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) {\n return new NetworkModule(settings, transportClient, Arrays.asList(plugins), threadPool, null, null, null, xContentRegistry(), null,\n- (a, b, c) -> {});\n+ new NullDispatcher());\n }\n }", "filename": "core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java", "status": "modified" }, { "diff": "@@ -19,17 +19,6 @@\n \n package org.elasticsearch.rest;\n \n-import java.io.IOException;\n-import java.util.Arrays;\n-import java.util.Collections;\n-import java.util.HashMap;\n-import java.util.HashSet;\n-import java.util.List;\n-import java.util.Map;\n-import java.util.Set;\n-import java.util.concurrent.atomic.AtomicBoolean;\n-import java.util.function.UnaryOperator;\n-\n import org.elasticsearch.client.node.NodeClient;\n import org.elasticsearch.common.breaker.CircuitBreaker;\n import org.elasticsearch.common.bytes.BytesArray;\n@@ -54,6 +43,19 @@\n import org.elasticsearch.test.rest.FakeRestRequest;\n import org.junit.Before;\n \n+import java.io.IOException;\n+import java.util.Arrays;\n+import java.util.Collections;\n+import java.util.HashMap;\n+import java.util.HashSet;\n+import java.util.List;\n+import java.util.Map;\n+import java.util.Set;\n+import java.util.concurrent.atomic.AtomicBoolean;\n+import java.util.concurrent.atomic.AtomicReference;\n+import java.util.function.UnaryOperator;\n+\n+import static org.hamcrest.Matchers.containsString;\n import static org.mockito.Matchers.any;\n import static org.mockito.Matchers.eq;\n import static org.mockito.Mockito.doCallRealMethod;\n@@ -261,18 +263,18 @@ public void testDispatchRequiresContentTypeForRequestsWithContent() {\n (r, c, client) -> c.sendResponse(\n new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)));\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n }\n \n public void testDispatchDoesNotRequireContentTypeForRequestsWithoutContent() {\n FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build();\n AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK);\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n }\n \n public void testDispatchWorksWithPlainText() {\n@@ -293,9 +295,9 @@ public boolean supportsPlainText() {\n }\n });\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n assertWarnings(\"Plain text request bodies are deprecated. Use request parameters or body in a supported format.\");\n }\n \n@@ -305,9 +307,9 @@ public void testDispatchWorksWithAutoDetection() {\n .withHeaders(Collections.singletonMap(\"Content-Type\", Collections.singletonList(\"application/x-www-form-urlencoded\"))).build();\n AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK);\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n assertWarnings(\"Content type detection for rest requests is deprecated. Specify the content type using the [Content-Type] header.\");\n }\n \n@@ -330,9 +332,9 @@ public boolean supportsContentStream() {\n }\n });\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n }\n \n public void testDispatchWithContentStream() {\n@@ -354,9 +356,9 @@ public boolean supportsContentStream() {\n }\n });\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n }\n \n public void testDispatchWithContentStreamAutoDetect() {\n@@ -375,9 +377,9 @@ public boolean supportsContentStream() {\n }\n });\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n assertWarnings(\"Content type detection for rest requests is deprecated. Specify the content type using the [Content-Type] header.\");\n }\n \n@@ -398,19 +400,19 @@ public boolean supportsContentStream() {\n }\n });\n \n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n \n assertWarnings(\"Content type detection for rest requests is deprecated. Specify the content type using the [Content-Type] header.\");\n \n // specified\n fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)\n .withContent(YamlXContent.contentBuilder().startObject().endObject().bytes(), XContentType.YAML).withPath(\"/foo\").build();\n channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE);\n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n }\n \n public void testStrictModeContentStream() {\n@@ -432,9 +434,9 @@ public boolean supportsContentStream() {\n return true;\n }\n });\n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n }\n \n public void testUnknownContentWithContentStream() {\n@@ -454,12 +456,32 @@ public boolean supportsContentStream() {\n return true;\n }\n });\n- assertFalse(channel.sendResponseCalled.get());\n+ assertFalse(channel.getSendResponseCalled());\n restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY));\n- assertTrue(channel.sendResponseCalled.get());\n+ assertTrue(channel.getSendResponseCalled());\n assertWarnings(\"Content type detection for rest requests is deprecated. Specify the content type using the [Content-Type] header.\");\n }\n \n+ public void testDispatchBadRequest() {\n+ final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build();\n+ final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST);\n+ restController.dispatchBadRequest(\n+ fakeRestRequest,\n+ channel,\n+ new ThreadContext(Settings.EMPTY),\n+ randomBoolean() ? new IllegalStateException(\"bad request\") : new Throwable(\"bad request\"));\n+ assertTrue(channel.getSendResponseCalled());\n+ assertThat(channel.getRestResponse().content().utf8ToString(), containsString(\"bad request\"));\n+ }\n+\n+ public void testDispatchBadRequestUnknownCause() {\n+ final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build();\n+ final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST);\n+ restController.dispatchBadRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY), null);\n+ assertTrue(channel.getSendResponseCalled());\n+ assertThat(channel.getRestResponse().content().utf8ToString(), containsString(\"unknown cause\"));\n+ }\n+\n private static final class TestHttpServerTransport extends AbstractLifecycleComponent implements\n HttpServerTransport {\n \n@@ -497,8 +519,9 @@ public HttpStats stats() {\n }\n \n private static final class AssertingChannel extends AbstractRestChannel {\n+ \n private final RestStatus expectedStatus;\n- private final AtomicBoolean sendResponseCalled = new AtomicBoolean(false);\n+ private final AtomicReference<RestResponse> responseReference = new AtomicReference<>();\n \n protected AssertingChannel(RestRequest request, boolean detailedErrorsEnabled, RestStatus expectedStatus) {\n super(request, detailedErrorsEnabled);\n@@ -508,8 +531,17 @@ protected AssertingChannel(RestRequest request, boolean detailedErrorsEnabled, R\n @Override\n public void sendResponse(RestResponse response) {\n assertEquals(expectedStatus, response.status());\n- sendResponseCalled.set(true);\n+ responseReference.set(response);\n+ }\n+ \n+ RestResponse getRestResponse() {\n+ return responseReference.get();\n+ }\n+ \n+ boolean getSendResponseCalled() {\n+ return getRestResponse() != null;\n }\n+ \n }\n \n private static final class ExceptionThrowingChannel extends AbstractRestChannel {", "filename": "core/src/test/java/org/elasticsearch/rest/RestControllerTests.java", "status": "modified" }, { "diff": "@@ -25,7 +25,6 @@\n import io.netty.channel.SimpleChannelInboundHandler;\n import io.netty.handler.codec.http.DefaultFullHttpRequest;\n import io.netty.handler.codec.http.FullHttpRequest;\n-\n import org.elasticsearch.common.util.concurrent.ThreadContext;\n import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;\n import org.elasticsearch.transport.netty4.Netty4Utils;\n@@ -65,11 +64,16 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except\n Unpooled.copiedBuffer(request.content()),\n request.headers(),\n request.trailingHeaders());\n-\n final Netty4HttpRequest httpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel());\n- serverTransport.dispatchRequest(\n- httpRequest,\n- new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext));\n+ final Netty4HttpChannel channel =\n+ new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext);\n+\n+ if (request.decoderResult().isSuccess()) {\n+ serverTransport.dispatchRequest(httpRequest, channel);\n+ } else {\n+ assert request.decoderResult().isFailure();\n+ serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause());\n+ }\n }\n \n @Override", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java", "status": "modified" }, { "diff": "@@ -496,8 +496,12 @@ public Netty4CorsConfig getCorsConfig() {\n return corsConfig;\n }\n \n- protected void dispatchRequest(RestRequest request, RestChannel channel) {\n- dispatcher.dispatch(request, channel, threadPool.getThreadContext());\n+ void dispatchRequest(final RestRequest request, final RestChannel channel) {\n+ dispatcher.dispatchRequest(request, channel, threadPool.getThreadContext());\n+ }\n+\n+ void dispatchBadRequest(final RestRequest request, final RestChannel channel, final Throwable cause) {\n+ dispatcher.dispatchBadRequest(request, channel, threadPool.getThreadContext(), cause);\n }\n \n protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java", "status": "modified" }, { "diff": "@@ -47,6 +47,7 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.util.MockBigArrays;\n import org.elasticsearch.http.HttpTransportSettings;\n+import org.elasticsearch.http.NullDispatcher;\n import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.rest.RestResponse;\n@@ -189,7 +190,7 @@ public void testHeadersSet() {\n Settings settings = Settings.builder().build();\n try (Netty4HttpServerTransport httpServerTransport =\n new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(),\n- (request, channel, context) -> {})) {\n+ new NullDispatcher())) {\n httpServerTransport.start();\n final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, \"/\");\n httpRequest.headers().add(HttpHeaderNames.ORIGIN, \"remote\");\n@@ -219,8 +220,7 @@ public void testHeadersSet() {\n public void testConnectionClose() throws Exception {\n final Settings settings = Settings.builder().build();\n try (Netty4HttpServerTransport httpServerTransport =\n- new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(),\n- (request, channel, context) -> {})) {\n+ new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), new NullDispatcher())) {\n httpServerTransport.start();\n final FullHttpRequest httpRequest;\n final boolean close = randomBoolean();\n@@ -256,7 +256,7 @@ private FullHttpResponse executeRequest(final Settings settings, final String or\n // construct request and send it over the transport layer\n try (Netty4HttpServerTransport httpServerTransport =\n new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(),\n- (request, channel, context) -> {})) {\n+ new NullDispatcher())) {\n httpServerTransport.start();\n final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, \"/\");\n if (originValue != null) {", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java", "status": "modified" }, { "diff": "@@ -37,6 +37,7 @@\n import org.elasticsearch.common.util.MockBigArrays;\n import org.elasticsearch.common.util.concurrent.ThreadContext;\n import org.elasticsearch.http.HttpServerTransport;\n+import org.elasticsearch.http.NullDispatcher;\n import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.test.ESTestCase;\n@@ -160,7 +161,7 @@ class CustomNettyHttpServerTransport extends Netty4HttpServerTransport {\n Netty4HttpServerPipeliningTests.this.networkService,\n Netty4HttpServerPipeliningTests.this.bigArrays,\n Netty4HttpServerPipeliningTests.this.threadPool,\n- xContentRegistry(), (request, channel, context) -> {});\n+ xContentRegistry(), new NullDispatcher());\n }\n \n @Override", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java", "status": "modified" }, { "diff": "@@ -21,34 +21,47 @@\n \n import io.netty.buffer.ByteBufUtil;\n import io.netty.buffer.Unpooled;\n+import io.netty.handler.codec.TooLongFrameException;\n import io.netty.handler.codec.http.DefaultFullHttpRequest;\n import io.netty.handler.codec.http.FullHttpRequest;\n import io.netty.handler.codec.http.FullHttpResponse;\n import io.netty.handler.codec.http.HttpMethod;\n import io.netty.handler.codec.http.HttpResponseStatus;\n import io.netty.handler.codec.http.HttpUtil;\n import io.netty.handler.codec.http.HttpVersion;\n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.network.NetworkService;\n+import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.transport.TransportAddress;\n+import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.common.util.MockBigArrays;\n+import org.elasticsearch.common.util.concurrent.ThreadContext;\n import org.elasticsearch.http.BindHttpException;\n+import org.elasticsearch.http.HttpServerTransport;\n+import org.elasticsearch.http.HttpTransportSettings;\n+import org.elasticsearch.http.NullDispatcher;\n import org.elasticsearch.http.netty4.cors.Netty4CorsConfig;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.rest.BytesRestResponse;\n+import org.elasticsearch.rest.RestChannel;\n+import org.elasticsearch.rest.RestRequest;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.junit.After;\n import org.junit.Before;\n \n+import java.io.IOException;\n+import java.nio.charset.Charset;\n import java.nio.charset.StandardCharsets;\n import java.util.Arrays;\n import java.util.Collections;\n import java.util.HashSet;\n import java.util.Set;\n+import java.util.concurrent.atomic.AtomicReference;\n import java.util.stream.Collectors;\n \n import static org.elasticsearch.common.Strings.collectionToDelimitedString;\n@@ -58,7 +71,11 @@\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;\n+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;\n import static org.elasticsearch.rest.RestStatus.OK;\n+import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.is;\n \n /**\n@@ -122,9 +139,19 @@ public void testCorsConfigWithDefaults() {\n * Test that {@link Netty4HttpServerTransport} supports the \"Expect: 100-continue\" HTTP header\n */\n public void testExpectContinueHeader() throws Exception {\n+ final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {\n+ @Override\n+ public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {\n+ channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray(\"done\")));\n+ }\n+\n+ @Override\n+ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) {\n+ throw new AssertionError();\n+ }\n+ };\n try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool,\n- xContentRegistry(), (request, channel, context) ->\n- channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray(\"done\"))))) {\n+ xContentRegistry(), dispatcher)) {\n transport.start();\n TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());\n \n@@ -146,15 +173,73 @@ public void testExpectContinueHeader() throws Exception {\n \n public void testBindUnavailableAddress() {\n try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool,\n- xContentRegistry(), (request, channel, context) -> {})) {\n+ xContentRegistry(), new NullDispatcher())) {\n transport.start();\n TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());\n Settings settings = Settings.builder().put(\"http.port\", remoteAddress.getPort()).build();\n try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool,\n- xContentRegistry(), (request, channel, context) -> {})) {\n+ xContentRegistry(), new NullDispatcher())) {\n BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start());\n assertEquals(\"Failed to bind to [\" + remoteAddress.getPort() + \"]\", bindHttpException.getMessage());\n }\n }\n }\n+\n+ public void testBadRequest() throws InterruptedException {\n+ final AtomicReference<Throwable> causeReference = new AtomicReference<>();\n+ final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {\n+\n+ @Override\n+ public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {\n+ throw new AssertionError();\n+ }\n+\n+ @Override\n+ public void dispatchBadRequest(final RestRequest request,\n+ final RestChannel channel,\n+ final ThreadContext threadContext,\n+ final Throwable cause) {\n+ causeReference.set(cause);\n+ try {\n+ final ElasticsearchException e = new ElasticsearchException(\"you sent a bad request and you should feel bad\");\n+ channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e));\n+ } catch (final IOException e) {\n+ throw new AssertionError(e);\n+ }\n+ }\n+\n+ };\n+\n+ final Settings settings;\n+ final int maxInitialLineLength;\n+ final Setting<ByteSizeValue> httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;\n+ if (randomBoolean()) {\n+ maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt();\n+ settings = Settings.EMPTY;\n+ } else {\n+ maxInitialLineLength = randomIntBetween(1, 8192);\n+ settings = Settings.builder().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + \"b\").build();\n+ }\n+\n+ try (Netty4HttpServerTransport transport =\n+ new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) {\n+ transport.start();\n+ final TransportAddress remoteAddress = randomFrom(transport.boundAddress.boundAddresses());\n+\n+ try (Netty4HttpClient client = new Netty4HttpClient()) {\n+ final String url = \"/\" + new String(new byte[maxInitialLineLength], Charset.forName(\"UTF-8\"));\n+ final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);\n+\n+ final FullHttpResponse response = client.post(remoteAddress.address(), request);\n+ assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST));\n+ assertThat(\n+ new String(response.content().array(), Charset.forName(\"UTF-8\")),\n+ containsString(\"you sent a bad request and you should feel bad\"));\n+ }\n+ }\n+\n+ assertNotNull(causeReference.get());\n+ assertThat(causeReference.get(), instanceOf(TooLongFrameException.class));\n+ }\n+\n }", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,84 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.rest;\n+\n+import org.apache.http.util.EntityUtils;\n+import org.elasticsearch.client.Response;\n+import org.elasticsearch.client.ResponseException;\n+import org.elasticsearch.common.settings.Setting;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.ByteSizeValue;\n+import org.elasticsearch.common.xcontent.XContentType;\n+import org.elasticsearch.http.HttpTransportSettings;\n+import org.elasticsearch.test.rest.ESRestTestCase;\n+import org.elasticsearch.test.rest.yaml.ObjectPath;\n+\n+import java.io.IOException;\n+import java.nio.charset.Charset;\n+import java.nio.charset.StandardCharsets;\n+import java.util.Collections;\n+import java.util.Map;\n+\n+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;\n+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;\n+import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.hasToString;\n+\n+public class Netty4BadRequestIT extends ESRestTestCase {\n+\n+ public void testBadRequest() throws IOException {\n+ final Response response = client().performRequest(\"GET\", \"/_nodes/settings\", Collections.emptyMap());\n+ final String body = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8);\n+ final String contentType = response.getHeader(\"Content-Type\");\n+ final XContentType xContentType = XContentType.fromMediaTypeOrFormat(contentType);\n+ final ObjectPath objectPath = ObjectPath.createFromXContent(xContentType.xContent(), body);\n+ final Map<String, Object> map = objectPath.evaluate(\"nodes\");\n+ int maxMaxInitialLineLength = Integer.MIN_VALUE;\n+ final Setting<ByteSizeValue> httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;\n+ final String key = httpMaxInitialLineLength.getKey().substring(\"http.\".length());\n+ for (Map.Entry<String, Object> entry : map.entrySet()) {\n+ @SuppressWarnings(\"unchecked\") final Map<String, Object> settings =\n+ (Map<String, Object>)((Map<String, Object>)entry.getValue()).get(\"settings\");\n+ final int maxIntialLineLength;\n+ if (settings.containsKey(\"http\")) {\n+ @SuppressWarnings(\"unchecked\") final Map<String, Object> httpSettings = (Map<String, Object>)settings.get(\"http\");\n+ if (httpSettings.containsKey(key)) {\n+ maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String)httpSettings.get(key), key).bytesAsInt();\n+ } else {\n+ maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt();\n+ }\n+ } else {\n+ maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt();\n+ }\n+ maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength);\n+ }\n+\n+ final String path = \"/\" + new String(new byte[maxMaxInitialLineLength], Charset.forName(\"UTF-8\")).replace('\\0', 'a');\n+ final ResponseException e =\n+ expectThrows(\n+ ResponseException.class,\n+ () -> client().performRequest(randomFrom(\"GET\", \"POST\", \"PUT\"), path, Collections.emptyMap()));\n+ assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus()));\n+ assertThat(e, hasToString(containsString(\"too_long_frame_exception\")));\n+ assertThat(e, hasToString(matches(\"An HTTP line is larger than \\\\d+ bytes\")));\n+ }\n+\n+}", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java", "status": "added" }, { "diff": "@@ -0,0 +1,38 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.http;\n+\n+import org.elasticsearch.common.util.concurrent.ThreadContext;\n+import org.elasticsearch.rest.RestChannel;\n+import org.elasticsearch.rest.RestRequest;\n+\n+public class NullDispatcher implements HttpServerTransport.Dispatcher {\n+\n+ @Override\n+ public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {\n+\n+ }\n+\n+ @Override\n+ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) {\n+\n+ }\n+\n+}", "filename": "test/framework/src/main/java/org/elasticsearch/http/NullDispatcher.java", "status": "added" } ] }
{ "body": "After #21123 when Elasticsearch receive a HEAD request it returns the Content-Length of the that it would return for a GET request with an empty response body. Except in the document exists, index exists, and type exists requests which return 0. We should fix them to also return the Content-Length that would be in the response.\n", "comments": [ { "body": "I'm adding the v5.1.0 label too, I think we should target a fix there.\n", "created_at": "2016-10-26T05:16:19Z" }, { "body": "These are all addressed now. Closing.", "created_at": "2017-06-12T12:10:12Z" } ], "number": 21125, "title": "Some endpoints return Content-Length: 0 for HEAD requests" }
{ "body": "Get source HEAD requests incorrectly return a content-length header of\r\n0. This commit addresses this by removing the special handling for get\r\nsource HEAD requests, and just relying on the general mechanism that\r\nexists for handling HEAD requests in the REST layer.\r\n\r\nRelates #21125\r\n\r\n", "number": 23151, "review_comments": [ { "body": "the removed comment seemed quite important. Doesn't it apply anymore?", "created_at": "2017-02-17T15:26:21Z" }, { "body": "I don't think the comment makes any sense. What do you think it means?", "created_at": "2017-02-17T15:31:11Z" }, { "body": "it used to mean that things break if you set parent before routing. I wonder if that is still true though, probably in the past the parent setter was also setting the routing value.", "created_at": "2017-02-17T15:35:14Z" }, { "body": "I don't think it's true, that's why I don't think the comment makes sense. These are just boring field setters.", "created_at": "2017-02-17T15:50:37Z" }, { "body": "you are right, that comment doesn't apply anymore, the underlying request changed a a while ago to separate routing and parent. I updated some other rest actions that had that same comment for the same reason. https://github.com/elastic/elasticsearch/commit/578853f264e8ddae5c919b1b7ca35524cb292d62", "created_at": "2017-02-17T16:12:17Z" }, { "body": "Nice, thanks @javanna.", "created_at": "2017-02-17T16:17:46Z" } ], "title": "Fix get source HEAD requests" }
{ "commits": [ { "message": "Fix get source HEAD requests\n\nGet source HEAD requests incorrectly return a content-length header of\n0. This commit addresses this by removing the special handling for get\nsource HEAD requests, and just relying on the general mechanism that\nexists for handling HEAD requests in the REST layer." }, { "message": "Use status code constants" } ], "files": [ { "diff": "@@ -564,7 +564,6 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) {\n registerHandler.accept(new RestGetAction(settings, restController));\n registerHandler.accept(new RestGetSourceAction(settings, restController));\n registerHandler.accept(new RestHeadAction.Document(settings, restController));\n- registerHandler.accept(new RestHeadAction.Source(settings, restController));\n registerHandler.accept(new RestMultiGetAction(settings, restController));\n registerHandler.accept(new RestDeleteAction(settings, restController));\n registerHandler.accept(new org.elasticsearch.rest.action.document.RestCountAction(settings, restController));", "filename": "core/src/main/java/org/elasticsearch/action/ActionModule.java", "status": "modified" }, { "diff": "@@ -36,21 +36,27 @@\n import java.io.IOException;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n+import static org.elasticsearch.rest.RestRequest.Method.HEAD;\n import static org.elasticsearch.rest.RestStatus.NOT_FOUND;\n import static org.elasticsearch.rest.RestStatus.OK;\n \n+/**\n+ * The REST handler for get source and head source APIs.\n+ */\n public class RestGetSourceAction extends BaseRestHandler {\n- public RestGetSourceAction(Settings settings, RestController controller) {\n+\n+ public RestGetSourceAction(final Settings settings, final RestController controller) {\n super(settings);\n controller.registerHandler(GET, \"/{index}/{type}/{id}/_source\", this);\n+ controller.registerHandler(HEAD, \"/{index}/{type}/{id}/_source\", this);\n }\n \n @Override\n public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {\n final GetRequest getRequest = new GetRequest(request.param(\"index\"), request.param(\"type\"), request.param(\"id\"));\n getRequest.operationThreaded(true);\n getRequest.refresh(request.paramAsBoolean(\"refresh\", getRequest.refresh()));\n- getRequest.routing(request.param(\"routing\")); // order is important, set it after routing, so it will set the routing\n+ getRequest.routing(request.param(\"routing\"));\n getRequest.parent(request.param(\"parent\"));\n getRequest.preference(request.param(\"preference\"));\n getRequest.realtime(request.paramAsBoolean(\"realtime\", getRequest.realtime()));\n@@ -59,15 +65,16 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n \n return channel -> {\n if (getRequest.fetchSourceContext() != null && !getRequest.fetchSourceContext().fetchSource()) {\n- ActionRequestValidationException validationError = new ActionRequestValidationException();\n+ final ActionRequestValidationException validationError = new ActionRequestValidationException();\n validationError.addValidationError(\"fetching source can not be disabled\");\n channel.sendResponse(new BytesRestResponse(channel, validationError));\n } else {\n client.get(getRequest, new RestResponseListener<GetResponse>(channel) {\n @Override\n- public RestResponse buildResponse(GetResponse response) throws Exception {\n- XContentBuilder builder = channel.newBuilder(request.getXContentType(), false);\n- if (response.isSourceEmpty()) { // check if doc source (or doc itself) is missing\n+ public RestResponse buildResponse(final GetResponse response) throws Exception {\n+ final XContentBuilder builder = channel.newBuilder(request.getXContentType(), false);\n+ // check if doc source (or doc itself) is missing\n+ if (response.isSourceEmpty()) {\n return new BytesRestResponse(NOT_FOUND, builder);\n } else {\n builder.rawValue(response.getSourceInternal());\n@@ -78,4 +85,5 @@ public RestResponse buildResponse(GetResponse response) throws Exception {\n }\n };\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java", "status": "modified" }, { "diff": "@@ -48,33 +48,18 @@ public abstract class RestHeadAction extends BaseRestHandler {\n */\n public static class Document extends RestHeadAction {\n public Document(Settings settings, RestController controller) {\n- super(settings, false);\n+ super(settings);\n controller.registerHandler(HEAD, \"/{index}/{type}/{id}\", this);\n }\n }\n \n- /**\n- * Handler to check for document source existence (may be disabled in the mapping).\n- */\n- public static class Source extends RestHeadAction {\n- public Source(Settings settings, RestController controller) {\n- super(settings, true);\n- controller.registerHandler(HEAD, \"/{index}/{type}/{id}/_source\", this);\n- }\n- }\n-\n- private final boolean source;\n-\n /**\n * All subclasses must be registered in {@link org.elasticsearch.common.network.NetworkModule}.\n+ * @param settings injected settings\n *\n- * @param settings injected settings\n- * @param source {@code false} to check for {@link GetResponse#isExists()}.\n- * {@code true} to also check for {@link GetResponse#isSourceEmpty()}.\n */\n- public RestHeadAction(Settings settings, boolean source) {\n+ public RestHeadAction(Settings settings) {\n super(settings);\n- this.source = source;\n }\n \n @Override\n@@ -95,8 +80,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n public RestResponse buildResponse(GetResponse response) {\n if (!response.isExists()) {\n return new BytesRestResponse(NOT_FOUND, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY);\n- } else if (source && response.isSourceEmpty()) { // doc exists, but source might not (disabled in the mapping)\n- return new BytesRestResponse(NOT_FOUND, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY);\n } else {\n return new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY);\n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java", "status": "modified" }, { "diff": "@@ -31,6 +31,9 @@\n import static java.util.Collections.emptyMap;\n import static java.util.Collections.singletonMap;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;\n+import static org.elasticsearch.rest.RestStatus.OK;\n+import static org.hamcrest.Matchers.empty;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n \n@@ -43,7 +46,18 @@ public void testHeadRoot() throws IOException {\n }\n \n private void createTestDoc() throws IOException {\n- client().performRequest(\"PUT\", \"test/test/1\", emptyMap(), new StringEntity(\"{\\\"test\\\": \\\"test\\\"}\"));\n+ createTestDoc(\"test\", \"test\");\n+ }\n+\n+ private void createTestDoc(final String indexName, final String typeName) throws IOException {\n+ try (XContentBuilder builder = jsonBuilder()) {\n+ builder.startObject();\n+ {\n+ builder.field(\"test\", \"test\");\n+ }\n+ builder.endObject();\n+ client().performRequest(\"PUT\", \"/\" + indexName + \"/\" + typeName + \"/\" + \"1\", emptyMap(), new StringEntity(builder.string()));\n+ }\n }\n \n public void testDocumentExists() throws IOException {\n@@ -110,9 +124,46 @@ public void testTemplateExists() throws IOException {\n }\n }\n \n- private void headTestCase(String url, Map<String, String> params, Matcher<Integer> matcher) throws IOException {\n+ public void testGetSourceAction() throws IOException {\n+ createTestDoc();\n+ headTestCase(\"/test/test/1/_source\", emptyMap(), greaterThan(0));\n+ headTestCase(\"/test/test/2/_source\", emptyMap(), NOT_FOUND.getStatus(), equalTo(0));\n+\n+ try (XContentBuilder builder = jsonBuilder()) {\n+ builder.startObject();\n+ {\n+ builder.startObject(\"mappings\");\n+ {\n+ builder.startObject(\"test-no-source\");\n+ {\n+ builder.startObject(\"_source\");\n+ {\n+ builder.field(\"enabled\", false);\n+ }\n+ builder.endObject();\n+ }\n+ builder.endObject();\n+ }\n+ builder.endObject();\n+ }\n+ builder.endObject();\n+ client().performRequest(\"PUT\", \"/test-no-source\", emptyMap(), new StringEntity(builder.string()));\n+ createTestDoc(\"test-no-source\", \"test-no-source\");\n+ headTestCase(\"/test-no-source/test-no-source/1/_source\", emptyMap(), NOT_FOUND.getStatus(), equalTo(0));\n+ }\n+ }\n+\n+ private void headTestCase(final String url, final Map<String, String> params, final Matcher<Integer> matcher) throws IOException {\n+ headTestCase(url, params, OK.getStatus(), matcher);\n+ }\n+\n+ private void headTestCase(\n+ final String url,\n+ final Map<String, String> params,\n+ final int expectedStatusCode,\n+ final Matcher<Integer> matcher) throws IOException {\n Response response = client().performRequest(\"HEAD\", url, params);\n- assertEquals(200, response.getStatusLine().getStatusCode());\n+ assertEquals(expectedStatusCode, response.getStatusLine().getStatusCode());\n assertThat(Integer.valueOf(response.getHeader(\"Content-Length\")), matcher);\n assertNull(\"HEAD requests shouldn't have a response body but \" + url + \" did\", response.getEntity());\n }", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.2.0\r\n\r\nAs of 5.2.0, using an object syntax for `exclude` in a terms aggregation will result in a 400 `illegal_argument_exception` error. The exact same query works in 5.1.2.\r\n\r\nExample _msearch request body (taken directly from Kibana):\r\n```\r\n{\"index\":[\"logstash-0\"],\"ignore_unavailable\":true,\"preference\":1486048204582}\r\n{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},{\"range\":{\"@timestamp\":{\"gte\":1486047452495,\"lte\":1486048352495,\"format\":\"epoch_millis\"}}}],\"must_not\":[]}},\"_source\":{\"excludes\":[]},\"aggs\":{\"2\":{\"terms\":{\"field\":\"referer\",\"exclude\":{\"pattern\":\"apollo\"},\"size\":5,\"order\":{\"_count\":\"desc\"}}}}}\r\n```\r\n\r\nResponse body:\r\n```\r\n{\"error\":{\"root_cause\":[{\"type\":\"illegal_argument_exception\",\"reason\":\"[terms] exclude doesn't support values of type: START_OBJECT\"}],\"type\":\"illegal_argument_exception\",\"reason\":\"[terms] exclude doesn't support values of type: START_OBJECT\"},\"status\":400}\r\n```\r\n\r\nThis was originally filed against Kibana: https://github.com/elastic/kibana/issues/10153\r\n\r\n@javanna believes it may have been caused by https://github.com/elastic/elasticsearch/pull/22048", "comments": [ { "body": "Good catch. It seems that this `exclude/pattern` syntax has been undocumented since 2.0, causing it to not be tested, but in the meantime we did not deprecate it either. I think we should add it back to 5.x to not break users in a minor release with a deprecation notice recommending to just do `exclude: apollo` instead.", "created_at": "2017-02-02T15:54:02Z" }, { "body": "++", "created_at": "2017-02-02T15:56:54Z" }, { "body": "removing exclude/include patterns is not fix. Are we getting any fix for this ? ", "created_at": "2017-02-12T02:52:17Z" }, { "body": "@Guruindya The proposal wasn't to remove the feature entirely but to remove the older, outdated syntax that causes this issue. If you use the exclude/include syntax that you see in the docs right now, that should be fine.", "created_at": "2017-02-12T02:59:38Z" }, { "body": "Is there any workaround available? Some of our vizualisations are using it, causing whole dashboards in kibana to break.", "created_at": "2017-02-13T08:10:14Z" }, { "body": "+1 , if I upgrade my \"client\" nodes to 5.2, almost all our visualization breaks. The whole cluster is in 5.2 now, except the 2 clients nodes (the ones used by Kibana), which are stuck in 5.0. Also kibana can't be upgraded in 5.2 (as it will fail because some of the cluster's node are in 5.0).", "created_at": "2017-02-13T08:16:59Z" }, { "body": "please consider releasing a urgent update to elasticsearch or kibana to fix this issue, we can not downgrade ES nor kibana and now we are stuck with broken visualization/dashboards. Having a visible problem like this for a long time will for sure generate a lot noise", "created_at": "2017-02-13T19:09:17Z" }, { "body": "@epixa \r\n\r\n> If you use the exclude/include syntax that you see in the docs right now, that should be fine.\r\n\r\nHow can i do that in kibana? From what i understand from the [kibana bug](https://github.com/elastic/kibana/issues/10153) , there is no workaround in kibana 5.2.0", "created_at": "2017-02-13T19:12:20Z" }, { "body": "Is there any ETA for this fix ? So that we can plan accordingly ", "created_at": "2017-02-14T12:42:10Z" }, { "body": "@Guruindya sorry, but we do not announce release dates ahead of time", "created_at": "2017-02-14T12:45:44Z" }, { "body": "I have just updated my cluster to v5.2 and hit the same issue with the include / exclude patterns breaking a lot of visualisations so would like to know if there is a 'proper' way to use the include / exclude filters and where they are documented and kept up to date. \r\n\r\nOne of the things I have struggled a lot with in the past is the lack of documentation for some parts of the ES stack so if there is a proper place to hut this down then please someone point me there.", "created_at": "2017-02-17T17:02:11Z" }, { "body": "A fix for this has just been released in 5.2.2 for both Kibana and Elasticsearch.", "created_at": "2017-02-28T17:05:52Z" } ], "number": 22933, "title": "Include and exclude patterns in visualizations throw an error" }
{ "body": "Relates #22933", "number": 23141, "review_comments": [], "title": "Remove support for the include/pattern syntax." }
{ "commits": [ { "message": "Remove support for the include/pattern syntax.\n\nRelates #22933" } ], "files": [ { "diff": "@@ -61,7 +61,6 @@\n public class IncludeExclude implements Writeable, ToXContent {\n public static final ParseField INCLUDE_FIELD = new ParseField(\"include\");\n public static final ParseField EXCLUDE_FIELD = new ParseField(\"exclude\");\n- public static final ParseField PATTERN_FIELD = new ParseField(\"pattern\");\n public static final ParseField PARTITION_FIELD = new ParseField(\"partition\");\n public static final ParseField NUM_PARTITIONS_FIELD = new ParseField(\"num_partitions\");\n // Needed to add this seed for a deterministic term hashing policy\n@@ -106,11 +105,6 @@ public static IncludeExclude parseInclude(XContentParser parser, QueryParseConte\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n- } else\n- // This \"include\":{\"pattern\":\"foo.*\"} syntax is undocumented since 2.0\n- // Regexes should be \"include\":\"foo.*\"\n- if (PATTERN_FIELD.match(currentFieldName)) {\n- return new IncludeExclude(parser.text(), null);\n } else if (NUM_PARTITIONS_FIELD.match(currentFieldName)) {\n numPartitions = parser.intValue();\n } else if (PARTITION_FIELD.match(currentFieldName)) {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@ way to reindex old indices is to use the `reindex` API.\n * <<breaking_60_stats_changes>>\n * <<breaking_60_rest_changes>>\n * <<breaking_60_search_changes>>\n+* <<breaking_60_aggregations_changes>>\n * <<breaking_60_mappings_changes>>\n * <<breaking_60_docs_changes>>\n * <<breaking_60_cluster_changes>>\n@@ -46,6 +47,8 @@ include::migrate_6_0/rest.asciidoc[]\n \n include::migrate_6_0/search.asciidoc[]\n \n+include::migrate_6_0/aggregations.asciidoc[]\n+\n include::migrate_6_0/mappings.asciidoc[]\n \n include::migrate_6_0/docs.asciidoc[]", "filename": "docs/reference/migration/migrate_6_0.asciidoc", "status": "modified" }, { "diff": "@@ -0,0 +1,51 @@\n+[[breaking_60_aggregations_changes]]\n+=== Aggregations changes\n+\n+==== Deprecated `pattern` element of include/exclude for terms aggregations has been removed\n+\n+The `include` and `exclude` options of `terms` aggregations used to accept a\n+sub `pattern` object which has been removed. The pattern should now be directly\n+put as a value of the `include` and `exclude` fields. For instance, the below\n+`terms` aggregation:\n+\n+[source,js]\n+--------------------------------------------------\n+POST /twitter/_search?size=0\n+{\n+ \"aggs\" : {\n+ \"top_users\" : {\n+ \"terms\" : {\n+ \"field\" : \"user\",\n+ \"include\": {\n+ \"pattern\": \"foo.*\"\n+ },\n+ \"exclude\": {\n+ \"pattern\": \".*bar\"\n+ }\n+ }\n+ }\n+ }\n+}\n+--------------------------------------------------\n+// CONSOLE\n+// TEST[skip: uses old unsupported syntax]\n+\n+should be replaced with:\n+\n+[source,js]\n+--------------------------------------------------\n+POST /twitter/_search?size=0\n+{\n+ \"aggs\" : {\n+ \"top_users\" : {\n+ \"terms\" : {\n+ \"field\" : \"user\",\n+ \"include\": \"foo.*\",\n+ \"exclude\": \".*bar\"\n+ }\n+ }\n+ }\n+}\n+--------------------------------------------------\n+// CONSOLE\n+// TEST[setup:twitter]", "filename": "docs/reference/migration/migrate_6_0/aggregations.asciidoc", "status": "added" } ] }
{ "body": "**Elasticsearch version**: 5.2.0\r\n\r\nAs of 5.2.0, using an object syntax for `exclude` in a terms aggregation will result in a 400 `illegal_argument_exception` error. The exact same query works in 5.1.2.\r\n\r\nExample _msearch request body (taken directly from Kibana):\r\n```\r\n{\"index\":[\"logstash-0\"],\"ignore_unavailable\":true,\"preference\":1486048204582}\r\n{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},{\"range\":{\"@timestamp\":{\"gte\":1486047452495,\"lte\":1486048352495,\"format\":\"epoch_millis\"}}}],\"must_not\":[]}},\"_source\":{\"excludes\":[]},\"aggs\":{\"2\":{\"terms\":{\"field\":\"referer\",\"exclude\":{\"pattern\":\"apollo\"},\"size\":5,\"order\":{\"_count\":\"desc\"}}}}}\r\n```\r\n\r\nResponse body:\r\n```\r\n{\"error\":{\"root_cause\":[{\"type\":\"illegal_argument_exception\",\"reason\":\"[terms] exclude doesn't support values of type: START_OBJECT\"}],\"type\":\"illegal_argument_exception\",\"reason\":\"[terms] exclude doesn't support values of type: START_OBJECT\"},\"status\":400}\r\n```\r\n\r\nThis was originally filed against Kibana: https://github.com/elastic/kibana/issues/10153\r\n\r\n@javanna believes it may have been caused by https://github.com/elastic/elasticsearch/pull/22048", "comments": [ { "body": "Good catch. It seems that this `exclude/pattern` syntax has been undocumented since 2.0, causing it to not be tested, but in the meantime we did not deprecate it either. I think we should add it back to 5.x to not break users in a minor release with a deprecation notice recommending to just do `exclude: apollo` instead.", "created_at": "2017-02-02T15:54:02Z" }, { "body": "++", "created_at": "2017-02-02T15:56:54Z" }, { "body": "removing exclude/include patterns is not fix. Are we getting any fix for this ? ", "created_at": "2017-02-12T02:52:17Z" }, { "body": "@Guruindya The proposal wasn't to remove the feature entirely but to remove the older, outdated syntax that causes this issue. If you use the exclude/include syntax that you see in the docs right now, that should be fine.", "created_at": "2017-02-12T02:59:38Z" }, { "body": "Is there any workaround available? Some of our vizualisations are using it, causing whole dashboards in kibana to break.", "created_at": "2017-02-13T08:10:14Z" }, { "body": "+1 , if I upgrade my \"client\" nodes to 5.2, almost all our visualization breaks. The whole cluster is in 5.2 now, except the 2 clients nodes (the ones used by Kibana), which are stuck in 5.0. Also kibana can't be upgraded in 5.2 (as it will fail because some of the cluster's node are in 5.0).", "created_at": "2017-02-13T08:16:59Z" }, { "body": "please consider releasing a urgent update to elasticsearch or kibana to fix this issue, we can not downgrade ES nor kibana and now we are stuck with broken visualization/dashboards. Having a visible problem like this for a long time will for sure generate a lot noise", "created_at": "2017-02-13T19:09:17Z" }, { "body": "@epixa \r\n\r\n> If you use the exclude/include syntax that you see in the docs right now, that should be fine.\r\n\r\nHow can i do that in kibana? From what i understand from the [kibana bug](https://github.com/elastic/kibana/issues/10153) , there is no workaround in kibana 5.2.0", "created_at": "2017-02-13T19:12:20Z" }, { "body": "Is there any ETA for this fix ? So that we can plan accordingly ", "created_at": "2017-02-14T12:42:10Z" }, { "body": "@Guruindya sorry, but we do not announce release dates ahead of time", "created_at": "2017-02-14T12:45:44Z" }, { "body": "I have just updated my cluster to v5.2 and hit the same issue with the include / exclude patterns breaking a lot of visualisations so would like to know if there is a 'proper' way to use the include / exclude filters and where they are documented and kept up to date. \r\n\r\nOne of the things I have struggled a lot with in the past is the lack of documentation for some parts of the ES stack so if there is a proper place to hut this down then please someone point me there.", "created_at": "2017-02-17T17:02:11Z" }, { "body": "A fix for this has just been released in 5.2.2 for both Kibana and Elasticsearch.", "created_at": "2017-02-28T17:05:52Z" } ], "number": 22933, "title": "Include and exclude patterns in visualizations throw an error" }
{ "body": "Closes #22933", "number": 23140, "review_comments": [ { "body": "Maybe throw an exception here if partition criteria supplied?\r\nPartitioning not supported when using regex-based filtering.", "created_at": "2017-02-15T09:35:34Z" }, { "body": "good point", "created_at": "2017-02-15T09:45:25Z" } ], "title": "Restore support for the `include/pattern` syntax." }
{ "commits": [ { "message": "Restore support for the `include/pattern` syntax.\n\nCloses #22933" }, { "message": "Make tests pass." }, { "message": "iter" }, { "message": "Prevent mixed pattern and partitions in includes." } ], "files": [ { "diff": "@@ -85,7 +85,7 @@ public static Aggregator.Parser getParser(ParseFieldRegistry<SignificanceHeurist\n IncludeExclude::parseInclude, IncludeExclude.INCLUDE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);\n \n parser.declareField((b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),\n- IncludeExclude::parseExclude, IncludeExclude.EXCLUDE_FIELD, ObjectParser.ValueType.STRING_ARRAY);\n+ IncludeExclude::parseExclude, IncludeExclude.EXCLUDE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);\n \n for (String name : significanceHeuristicParserRegistry.getNames()) {\n parser.declareObject(SignificantTermsAggregationBuilder::significanceHeuristic,", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -89,7 +89,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Valu\n IncludeExclude::parseInclude, IncludeExclude.INCLUDE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);\n \n PARSER.declareField((b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),\n- IncludeExclude::parseExclude, IncludeExclude.EXCLUDE_FIELD, ObjectParser.ValueType.STRING_ARRAY);\n+ IncludeExclude::parseExclude, IncludeExclude.EXCLUDE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);\n }\n \n public static AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -44,7 +44,6 @@\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n-import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.DocValueFormat;\n \n import java.io.IOException;\n@@ -61,7 +60,7 @@\n public class IncludeExclude implements Writeable, ToXContent {\n public static final ParseField INCLUDE_FIELD = new ParseField(\"include\");\n public static final ParseField EXCLUDE_FIELD = new ParseField(\"exclude\");\n- public static final ParseField PATTERN_FIELD = new ParseField(\"pattern\");\n+ public static final ParseField PATTERN_FIELD = new ParseField(\"pattern\").withAllDeprecated(\"Put patterns directly under the [include] or [exclude]\");\n public static final ParseField PARTITION_FIELD = new ParseField(\"partition\");\n public static final ParseField NUM_PARTITIONS_FIELD = new ParseField(\"num_partitions\");\n // Needed to add this seed for a deterministic term hashing policy\n@@ -94,7 +93,7 @@ public static IncludeExclude merge(IncludeExclude include, IncludeExclude exclud\n }\n }\n \n- public static IncludeExclude parseInclude(XContentParser parser, QueryParseContext context) throws IOException {\n+ public static IncludeExclude parseInclude(XContentParser parser) throws IOException {\n XContentParser.Token token = parser.currentToken();\n if (token == XContentParser.Token.VALUE_STRING) {\n return new IncludeExclude(parser.text(), null);\n@@ -103,14 +102,15 @@ public static IncludeExclude parseInclude(XContentParser parser, QueryParseConte\n } else if (token == XContentParser.Token.START_OBJECT) {\n String currentFieldName = null;\n Integer partition = null, numPartitions = null;\n+ String pattern = null;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n } else\n // This \"include\":{\"pattern\":\"foo.*\"} syntax is undocumented since 2.0\n // Regexes should be \"include\":\"foo.*\"\n if (PATTERN_FIELD.match(currentFieldName)) {\n- return new IncludeExclude(parser.text(), null);\n+ pattern = parser.text();\n } else if (NUM_PARTITIONS_FIELD.match(currentFieldName)) {\n numPartitions = parser.intValue();\n } else if (PARTITION_FIELD.match(currentFieldName)) {\n@@ -120,6 +120,17 @@ public static IncludeExclude parseInclude(XContentParser parser, QueryParseConte\n \"Unknown parameter in Include/Exclude clause: \" + currentFieldName);\n }\n }\n+\n+ final boolean hasPattern = pattern != null;\n+ final boolean hasPartition = partition != null || numPartitions != null;\n+ if (hasPattern && hasPartition) {\n+ throw new IllegalArgumentException(\"Cannot mix pattern-based and partition-based includes\");\n+ }\n+\n+ if (pattern != null) {\n+ return new IncludeExclude(pattern, null);\n+ }\n+\n if (partition == null) {\n throw new IllegalArgumentException(\"Missing [\" + PARTITION_FIELD.getPreferredName()\n + \"] parameter for partition-based include\");\n@@ -134,12 +145,28 @@ public static IncludeExclude parseInclude(XContentParser parser, QueryParseConte\n }\n }\n \n- public static IncludeExclude parseExclude(XContentParser parser, QueryParseContext context) throws IOException {\n+ public static IncludeExclude parseExclude(XContentParser parser) throws IOException {\n XContentParser.Token token = parser.currentToken();\n if (token == XContentParser.Token.VALUE_STRING) {\n return new IncludeExclude(null, parser.text());\n } else if (token == XContentParser.Token.START_ARRAY) {\n return new IncludeExclude(null, new TreeSet<>(parseArrayToSet(parser)));\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ String currentFieldName = null;\n+ String pattern = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (PATTERN_FIELD.match(currentFieldName)) {\n+ pattern = parser.text();\n+ } else {\n+ throw new IllegalArgumentException(\"Unrecognized field [\" + parser.currentName() + \"]\");\n+ }\n+ }\n+ if (pattern == null) {\n+ throw new IllegalArgumentException(\"Missing [pattern] element under [exclude]\");\n+ }\n+ return new IncludeExclude(null, pattern);\n } else {\n throw new IllegalArgumentException(\"Unrecognized token for an exclude [\" + token + \"]\");\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java", "status": "modified" }, { "diff": "@@ -30,7 +30,7 @@\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentType;\n-import org.elasticsearch.index.query.QueryParseContext;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;\n import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude.OrdinalsFilter;\n@@ -231,11 +231,10 @@ private IncludeExclude serialize(IncludeExclude incExc, ParseField field) throws\n assertEquals(field.getPreferredName(), parser.currentName());\n token = parser.nextToken();\n \n- QueryParseContext parseContext = new QueryParseContext(parser);\n if (field.getPreferredName().equalsIgnoreCase(\"include\")) {\n- return IncludeExclude.parseInclude(parser, parseContext);\n+ return IncludeExclude.parseInclude(parser);\n } else if (field.getPreferredName().equalsIgnoreCase(\"exclude\")) {\n- return IncludeExclude.parseExclude(parser, parseContext);\n+ return IncludeExclude.parseExclude(parser);\n } else {\n throw new IllegalArgumentException(\n \"Unexpected field name serialized in test: \" + field.getPreferredName());\n@@ -271,7 +270,6 @@ private IncludeExclude serializeMixedRegex(IncludeExclude incExc) throws IOExcep\n builder.endObject();\n \n XContentParser parser = createParser(builder);\n- QueryParseContext parseContext = new QueryParseContext(parser);\n XContentParser.Token token = parser.nextToken();\n assertEquals(token, XContentParser.Token.START_OBJECT);\n \n@@ -281,10 +279,10 @@ private IncludeExclude serializeMixedRegex(IncludeExclude incExc) throws IOExcep\n assertEquals(XContentParser.Token.FIELD_NAME, token);\n if (IncludeExclude.INCLUDE_FIELD.match(parser.currentName())) {\n token = parser.nextToken();\n- inc = IncludeExclude.parseInclude(parser, parseContext);\n+ inc = IncludeExclude.parseInclude(parser);\n } else if (IncludeExclude.EXCLUDE_FIELD.match(parser.currentName())) {\n token = parser.nextToken();\n- exc = IncludeExclude.parseExclude(parser, parseContext);\n+ exc = IncludeExclude.parseExclude(parser);\n } else {\n throw new IllegalArgumentException(\"Unexpected field name serialized in test: \" + parser.currentName());\n }\n@@ -295,4 +293,18 @@ private IncludeExclude serializeMixedRegex(IncludeExclude incExc) throws IOExcep\n return IncludeExclude.merge(inc, exc);\n }\n \n+ public void testMixRegexAndPartition() throws Exception {\n+ XContentBuilder builder = JsonXContent.contentBuilder()\n+ .startObject()\n+ .field(\"pattern\", \"a.*\")\n+ .field(\"partition\", 1)\n+ .field(\"num_partitions\", 3)\n+ .endObject();\n+ try (XContentParser parser = createParser(builder)) {\n+ parser.nextToken();\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> IncludeExclude.parseInclude(parser));\n+ assertEquals(\"Cannot mix pattern-based and partition-based includes\", e.getMessage());\n+ assertWarnings(\"Deprecated field [pattern] used, replaced by [Put patterns directly under the [include] or [exclude]]\");\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/support/IncludeExcludeTests.java", "status": "modified" }, { "diff": "@@ -667,3 +667,49 @@ setup:\n - match: { aggregations.double_terms.buckets.0.key: 3.5 }\n \n - match: { aggregations.double_terms.buckets.0.doc_count: 1 }\n+\n+---\n+\"Pattern include/exclude\":\n+\n+ - skip:\n+ features: \"warnings\"\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body: { \"str\" : \"abc\" }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 2\n+ body: { \"str\": \"bcd\" }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 3\n+ body: { \"str\": \"cde\" }\n+\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ warnings:\n+ - \"Deprecated field [pattern] used, replaced by [Put patterns directly under the [include] or [exclude]]\"\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"str_terms\" : { \"terms\" : { \"field\" : \"str\", \"include\" : {\"pattern\": \".*d.*\"}, \"exclude\": { \"pattern\": \".*e.*\" } } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.str_terms.buckets: 1 }\n+\n+ - match: { aggregations.str_terms.buckets.0.key: \"bcd\" }\n+\n+ - is_false: aggregations.str_terms.buckets.0.key_as_string\n+\n+ - match: { aggregations.str_terms.buckets.0.doc_count: 1 }", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml", "status": "modified" }, { "diff": "@@ -145,4 +145,68 @@\n search:\n body: { \"size\" : 0, \"aggs\" : { \"ip_terms\" : { \"significant_terms\" : { \"field\" : \"ip\", \"exclude\" : \"127.*\" } } } }\n \n- \n+---\n+\"Pattern include/exclude\":\n+\n+ - skip:\n+ features: \"warnings\"\n+\n+ - do:\n+ indices.create:\n+ index: test_1\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ mappings:\n+ test:\n+ properties:\n+ str:\n+ type: keyword\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body: { \"str\" : \"abc\" }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 2\n+ body: { \"str\": \"bcd\" }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 3\n+ body: { \"str\": \"cde\" }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 4\n+ body: { }\n+\n+\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ warnings:\n+ - \"Deprecated field [pattern] used, replaced by [Put patterns directly under the [include] or [exclude]]\"\n+ search:\n+ body: { \"size\" : 0, \"query\" : { \"exists\" : { \"field\" : \"str\" } }, \"aggs\" : { \"str_terms\" : { \"significant_terms\" : { \"field\" : \"str\", \"min_doc_count\": 1, \"include\" : {\"pattern\": \".*d.*\"}, \"exclude\": { \"pattern\": \".*e.*\" } } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.str_terms.buckets: 1 }\n+\n+ - match: { aggregations.str_terms.buckets.0.key: \"bcd\" }\n+\n+ - is_false: aggregations.str_terms.buckets.0.key_as_string\n+\n+ - match: { aggregations.str_terms.buckets.0.doc_count: 1 }", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml", "status": "modified" } ] }
{ "body": "This pr should cause unicode elements in the location header to be percent-encoded, instead of being left alone.\n\nFor the cases mentioned by @weltenwort in #21016, they now return:\n\n```\ncurl -XPUT -v 'http://localhost:9200/someindex/sometype/%C3%A4' -d {}\n< HTTP/1.1 201 Created\n< Location: /someindex/sometype/%C3%A4\n< content-type: application/json; charset=UTF-8\n< content-length: 148\n< \n```\n\n```\ncurl -XPUT -v 'http://localhost:9200/someindex/sometype/%E2%9D%A4' -d '{}'\n< HTTP/1.1 201 Created\n< Location: /someindex/sometype/%E2%9D%A4\n< content-type: application/json; charset=UTF-8\n< content-length: 149\n< \n```\n\nThe above responses compare favorably with the responses from a checkout of current master:\n\n```\ncurl -XPUT -v 'http://localhost:9200/someindex/sometype/%C3%A4' -d {}\n< HTTP/1.1 201 Created\n< Location: /someindex/sometype/‰\n< content-type: application/json; charset=UTF-8\n< content-length: 147\n< \n```\n\n```\ncurl -XPUT -v 'http://localhost:9200/someindex/sometype/%E2%9D%A4' -d '{}'\n< HTTP/1.1 201 Created\n< Location: /someindex/sometype/?\n< content-type: application/json; charset=UTF-8\n< content-length: 149\n< \n```\n\nCloses #21016\n", "comments": [ { "body": "@jasontedor, @nik9000 I've collapsed the try/catch return statements into a single return statement, commented it and added a logging statement. I don't expect the exception case to be hit at all, as the Location header should contain a valid URI.\n\nThanks for the comments and reviews!\n", "created_at": "2016-10-22T06:27:51Z" }, { "body": "@nik9000 @jasontedor I've rebased onto latest master. Returning null from this method will cause the downstream consumer to not add the 'Location' header to the response. We expect to almost never return null.\n", "created_at": "2016-11-10T05:57:10Z" }, { "body": "@nik9000 I've made the change and added a test for the exception behavior.\n", "created_at": "2016-11-11T16:55:54Z" }, { "body": "@jasontedor Oops, sorry 'bout that. I've killed the logger.\nThanks for the catch!\n", "created_at": "2016-11-14T10:52:46Z" }, { "body": "@jasontedor @nik9000 rebased to master. Was there something else I should have fixed?", "created_at": "2016-12-14T06:20:59Z" }, { "body": "@nik9000 Wow, did not know that expectThrows was a thing. Nifty.", "created_at": "2016-12-15T06:55:34Z" }, { "body": "This looks good to me now.\r\n\r\nelasticmachine, please test this", "created_at": "2016-12-19T19:01:15Z" }, { "body": "Thanks @a2lin! Sorry it took me so long to review this. I've merged to master and I'll backport to 5.x so it should go out with 5.2.\r\n\r\nmaster: 0ab3cbe3a3b15a154c79138c6d0e6f8dfa888876\r\n5.x: 8e3d09d2ec34691d32d5daad8931da22b74ee995", "created_at": "2016-12-19T20:56:53Z" }, { "body": "Thanks for reviewing!", "created_at": "2016-12-20T04:57:04Z" } ], "number": 21057, "title": "Adds percent-encoding for Location headers" }
{ "body": "Today when trying to encode the location header to ASCII, we rely on the Java URI API. This API requires a proper URI which blows up whenever the URI contains, for example, a space (which can happen if the type, ID, or routing contain a space). This commit addresses this issue by properly encoding the URI. Additionally, we remove the need to create a URI simplifying the code flow.\r\n\r\nCloses #23115, relates #21057", "number": 23133, "review_comments": [ { "body": "nit: can you cap this at 72 columns?", "created_at": "2017-02-12T17:49:14Z" }, { "body": "I think it's odd that we would have a 140 character limit on Java source code but only 72 characters on Javadocs (and comments?). If you feel strongly about this, I think that we should discuss it more broadly.", "created_at": "2017-02-13T14:34:39Z" } ], "title": "Properly encode location header" }
{ "commits": [ { "message": "Properly encode location header\n\nToday when trying to encode the location header to ASCII, we rely on the\nJava URI API. This API requires a proper URI which blows up whenever the\nURI contains, for example, a space (which can happen if the type, ID, or\nrouting contain a space). This commit addresses this issue by properly\nencoding the URI. Additionally, we remove the need to create a URI\nsimplifying the code flow." }, { "message": "Remove unnecessary imports" } ], "files": [ { "diff": "@@ -38,8 +38,11 @@\n import org.elasticsearch.rest.RestStatus;\n \n import java.io.IOException;\n+import java.io.UnsupportedEncodingException;\n import java.net.URI;\n import java.net.URISyntaxException;\n+import java.net.URLEncoder;\n+import java.nio.charset.Charset;\n import java.util.Locale;\n \n import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;\n@@ -201,31 +204,43 @@ public RestStatus status() {\n }\n \n /**\n- * Gets the location of the written document as a string suitable for a {@code Location} header.\n- * @param routing any routing used in the request. If null the location doesn't include routing information.\n+ * Return the relative URI for the location of the document suitable for use in the {@code Location} header. The use of relative URIs is\n+ * permitted as of HTTP/1.1 (cf. https://tools.ietf.org/html/rfc7231#section-7.1.2).\n *\n+ * @param routing custom routing or {@code null} if custom routing is not used\n+ * @return the relative URI for the location of the document\n */\n- public String getLocation(@Nullable String routing) throws URISyntaxException {\n- // Absolute path for the location of the document. This should be allowed as of HTTP/1.1:\n- // https://tools.ietf.org/html/rfc7231#section-7.1.2\n- String index = getIndex();\n- String type = getType();\n- String id = getId();\n- String routingStart = \"?routing=\";\n- int bufferSize = 3 + index.length() + type.length() + id.length();\n- if (routing != null) {\n- bufferSize += routingStart.length() + routing.length();\n+ public String getLocation(@Nullable String routing) {\n+ final String encodedIndex;\n+ final String encodedType;\n+ final String encodedId;\n+ final String encodedRouting;\n+ try {\n+ // encode the path components separately otherwise the path separators will be encoded\n+ encodedIndex = URLEncoder.encode(getIndex(), \"UTF-8\");\n+ encodedType = URLEncoder.encode(getType(), \"UTF-8\");\n+ encodedId = URLEncoder.encode(getId(), \"UTF-8\");\n+ encodedRouting = routing == null ? null : URLEncoder.encode(routing, \"UTF-8\");\n+ } catch (final UnsupportedEncodingException e) {\n+ throw new AssertionError(e);\n }\n- StringBuilder location = new StringBuilder(bufferSize);\n- location.append('/').append(index);\n- location.append('/').append(type);\n- location.append('/').append(id);\n- if (routing != null) {\n- location.append(routingStart).append(routing);\n+ final String routingStart = \"?routing=\";\n+ final int bufferSizeExcludingRouting = 3 + encodedIndex.length() + encodedType.length() + encodedId.length();\n+ final int bufferSize;\n+ if (encodedRouting == null) {\n+ bufferSize = bufferSizeExcludingRouting;\n+ } else {\n+ bufferSize = bufferSizeExcludingRouting + routingStart.length() + encodedRouting.length();\n+ }\n+ final StringBuilder location = new StringBuilder(bufferSize);\n+ location.append('/').append(encodedIndex);\n+ location.append('/').append(encodedType);\n+ location.append('/').append(encodedId);\n+ if (encodedRouting != null) {\n+ location.append(routingStart).append(encodedRouting);\n }\n \n- URI uri = new URI(location.toString());\n- return uri.toASCIIString();\n+ return location.toString();\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/action/DocWriteResponse.java", "status": "modified" }, { "diff": "@@ -57,7 +57,7 @@ public RestResponse buildResponse(Response response, XContentBuilder builder) th\n response.toXContent(builder, channel.request());\n RestResponse restResponse = new BytesRestResponse(response.status(), builder);\n if (RestStatus.CREATED == restResponse.status()) {\n- String location = extractLocation.apply(response);\n+ final String location = extractLocation.apply(response);\n if (location != null) {\n restResponse.addHeader(\"Location\", location);\n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/RestStatusToXContentListener.java", "status": "modified" }, { "diff": "@@ -31,7 +31,6 @@\n import org.elasticsearch.rest.action.RestStatusToXContentListener;\n \n import java.io.IOException;\n-import java.net.URISyntaxException;\n \n import static org.elasticsearch.rest.RestRequest.Method.POST;\n import static org.elasticsearch.rest.RestRequest.Method.PUT;\n@@ -80,14 +79,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n }\n \n return channel ->\n- client.index(indexRequest, new RestStatusToXContentListener<>(channel, r -> {\n- try {\n- return r.getLocation(indexRequest.routing());\n- } catch (URISyntaxException ex) {\n- logger.warn(\"Location string is not a valid URI.\", ex);\n- return null;\n- }\n- }));\n+ client.index(indexRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(indexRequest.routing())));\n }\n \n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java", "status": "modified" }, { "diff": "@@ -36,7 +36,6 @@\n import org.elasticsearch.search.fetch.subphase.FetchSourceContext;\n \n import java.io.IOException;\n-import java.net.URISyntaxException;\n \n import static org.elasticsearch.rest.RestRequest.Method.POST;\n \n@@ -98,13 +97,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n });\n \n return channel ->\n- client.update(updateRequest, new RestStatusToXContentListener<>(channel, r -> {\n- try {\n- return r.getLocation(updateRequest.routing());\n- } catch (URISyntaxException ex) {\n- logger.warn(\"Location string is not a valid URI.\", ex);\n- return null;\n- }\n- }));\n+ client.update(updateRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(updateRequest.routing())));\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java", "status": "modified" }, { "diff": "@@ -30,55 +30,49 @@\n import org.elasticsearch.test.ESTestCase;\n \n import java.io.IOException;\n-import java.net.URISyntaxException;\n \n import static org.hamcrest.Matchers.hasEntry;\n import static org.hamcrest.Matchers.hasKey;\n import static org.hamcrest.Matchers.not;\n \n public class DocWriteResponseTests extends ESTestCase {\n- public void testGetLocation() throws URISyntaxException {\n- DocWriteResponse response =\n- new DocWriteResponse(\n- new ShardId(\"index\", \"uuid\", 0),\n- \"type\",\n- \"id\",\n- SequenceNumbersService.UNASSIGNED_SEQ_NO,\n- 0,\n- Result.CREATED) {\n- // DocWriteResponse is abstract so we have to sneak a subclass in here to test it.\n- };\n+ public void testGetLocation() {\n+ final DocWriteResponse response =\n+ new DocWriteResponse(\n+ new ShardId(\"index\", \"uuid\", 0),\n+ \"type\",\n+ \"id\",\n+ SequenceNumbersService.UNASSIGNED_SEQ_NO,\n+ 0,\n+ Result.CREATED) {};\n assertEquals(\"/index/type/id\", response.getLocation(null));\n assertEquals(\"/index/type/id?routing=test_routing\", response.getLocation(\"test_routing\"));\n }\n \n- public void testGetLocationNonAscii() throws URISyntaxException {\n- DocWriteResponse response =\n- new DocWriteResponse(\n- new ShardId(\"index\", \"uuid\", 0),\n- \"type\",\n- \"❤\",\n- SequenceNumbersService.UNASSIGNED_SEQ_NO,\n- 0,\n- Result.CREATED) {\n- };\n+ public void testGetLocationNonAscii() {\n+ final DocWriteResponse response =\n+ new DocWriteResponse(\n+ new ShardId(\"index\", \"uuid\", 0),\n+ \"type\",\n+ \"❤\",\n+ SequenceNumbersService.UNASSIGNED_SEQ_NO,\n+ 0,\n+ Result.CREATED) {};\n assertEquals(\"/index/type/%E2%9D%A4\", response.getLocation(null));\n- assertEquals(\"/index/type/%E2%9D%A4?routing=%C3%A4\", response.getLocation(\"%C3%A4\"));\n+ assertEquals(\"/index/type/%E2%9D%A4?routing=%C3%A4\", response.getLocation(\"ä\"));\n }\n \n- public void testInvalidGetLocation() {\n- String invalidPath = \"!^*$(@!^!#@\";\n- DocWriteResponse invalid =\n- new DocWriteResponse(\n- new ShardId(\"index\", \"uuid\", 0),\n- \"type\",\n- invalidPath,\n- SequenceNumbersService.UNASSIGNED_SEQ_NO,\n- 0,\n- Result.CREATED) {\n- };\n- Throwable exception = expectThrows(URISyntaxException.class, () -> invalid.getLocation(null));\n- assertTrue(exception.getMessage().contains(invalidPath));\n+ public void testGetLocationWithSpaces() {\n+ final DocWriteResponse response =\n+ new DocWriteResponse(\n+ new ShardId(\"index\", \"uuid\", 0),\n+ \"type\",\n+ \"a b\",\n+ SequenceNumbersService.UNASSIGNED_SEQ_NO,\n+ 0,\n+ Result.CREATED) {};\n+ assertEquals(\"/index/type/a+b\", response.getLocation(null));\n+ assertEquals(\"/index/type/a+b?routing=c+d\", response.getLocation(\"c d\"));\n }\n \n /**", "filename": "core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java", "status": "modified" } ] }
{ "body": "After #21123 when Elasticsearch receive a HEAD request it returns the Content-Length of the that it would return for a GET request with an empty response body. Except in the document exists, index exists, and type exists requests which return 0. We should fix them to also return the Content-Length that would be in the response.\n", "comments": [ { "body": "I'm adding the v5.1.0 label too, I think we should target a fix there.\n", "created_at": "2016-10-26T05:16:19Z" }, { "body": "These are all addressed now. Closing.", "created_at": "2017-06-12T12:10:12Z" } ], "number": 21125, "title": "Some endpoints return Content-Length: 0 for HEAD requests" }
{ "body": "Template HEAD requests incorrectly return a content-length header of 0. This commit addresses this by removing the special handling for template HEAD requests, and just relying on the general mechanism that exists for handling HEAD requests in the REST layer.\r\n\r\nRelates #21125\r\n\r\n", "number": 23130, "review_comments": [], "title": "Fix template HEAD requests" }
{ "commits": [ { "message": "Fix template HEAD requests\n\nTemplate HEAD requests incorrectly return a content-length header of\n0. This commit addresses this by removing the special handling for\ntemplate HEAD requests, and just relying on the general mechanism that\nexists for handling HEAD requests in the REST layer." } ], "files": [ { "diff": "@@ -248,7 +248,6 @@\n import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction;\n import org.elasticsearch.rest.action.admin.indices.RestGetMappingAction;\n import org.elasticsearch.rest.action.admin.indices.RestGetSettingsAction;\n-import org.elasticsearch.rest.action.admin.indices.RestHeadIndexTemplateAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndexDeleteAliasesAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndexPutAliasAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndicesAliasesAction;\n@@ -549,7 +548,6 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) {\n registerHandler.accept(new RestGetIndexTemplateAction(settings, restController));\n registerHandler.accept(new RestPutIndexTemplateAction(settings, restController));\n registerHandler.accept(new RestDeleteIndexTemplateAction(settings, restController));\n- registerHandler.accept(new RestHeadIndexTemplateAction(settings, restController));\n \n registerHandler.accept(new RestPutMappingAction(settings, restController));\n registerHandler.accept(new RestGetMappingAction(settings, restController));", "filename": "core/src/main/java/org/elasticsearch/action/ActionModule.java", "status": "modified" }, { "diff": "@@ -34,22 +34,27 @@\n import java.util.Set;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n+import static org.elasticsearch.rest.RestRequest.Method.HEAD;\n import static org.elasticsearch.rest.RestStatus.NOT_FOUND;\n import static org.elasticsearch.rest.RestStatus.OK;\n \n+/**\n+ * The REST handler for get template and head template APIs.\n+ */\n public class RestGetIndexTemplateAction extends BaseRestHandler {\n- public RestGetIndexTemplateAction(Settings settings, RestController controller) {\n- super(settings);\n \n+ public RestGetIndexTemplateAction(final Settings settings, final RestController controller) {\n+ super(settings);\n controller.registerHandler(GET, \"/_template\", this);\n controller.registerHandler(GET, \"/_template/{name}\", this);\n+ controller.registerHandler(HEAD, \"/_template/{name}\", this);\n }\n \n @Override\n public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {\n final String[] names = Strings.splitStringByCommaToArray(request.param(\"name\"));\n \n- GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names);\n+ final GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names);\n getIndexTemplatesRequest.local(request.paramAsBoolean(\"local\", getIndexTemplatesRequest.local()));\n getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime(\"master_timeout\", getIndexTemplatesRequest.masterNodeTimeout()));\n \n@@ -60,9 +65,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n .indices()\n .getTemplates(getIndexTemplatesRequest, new RestToXContentListener<GetIndexTemplatesResponse>(channel) {\n @Override\n- protected RestStatus getStatus(GetIndexTemplatesResponse response) {\n- boolean templateExists = false == response.getIndexTemplates().isEmpty();\n-\n+ protected RestStatus getStatus(final GetIndexTemplatesResponse response) {\n+ final boolean templateExists = response.getIndexTemplates().isEmpty() == false;\n return (templateExists || implicitAll) ? OK : NOT_FOUND;\n }\n });", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java", "status": "modified" }, { "diff": "@@ -92,6 +92,24 @@ public void testAliasExists() throws IOException {\n }\n }\n \n+ public void testTemplateExists() throws IOException {\n+ try (XContentBuilder builder = jsonBuilder()) {\n+ builder.startObject();\n+ {\n+ builder.array(\"index_patterns\", \"*\");\n+ builder.startObject(\"settings\");\n+ {\n+ builder.field(\"number_of_replicas\", 0);\n+ }\n+ builder.endObject();\n+ }\n+ builder.endObject();\n+\n+ client().performRequest(\"PUT\", \"/_template/template\", emptyMap(), new StringEntity(builder.string()));\n+ headTestCase(\"/_template/template\", emptyMap(), greaterThan(0));\n+ }\n+ }\n+\n private void headTestCase(String url, Map<String, String> params, Matcher<Integer> matcher) throws IOException {\n Response response = client().performRequest(\"HEAD\", url, params);\n assertEquals(200, response.getStatusLine().getStatusCode());", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java", "status": "modified" } ] }
{ "body": "This query expects boolean queries composed of term queries. However with recent changes, it became likely to have `GraphQuery` objects in the query when synonyms are enabled. This causes `MatchQuery.boolToExtendedCommonTermsQuery` to ignore the frequency cutoff entirely.", "comments": [], "number": 23102, "title": "ExtendedCommonTermsQuery does not work with GraphQuery" }
{ "body": "GraphQueries are now generated as simple clauses in BooleanQuery. So for instance a multi terms synonym will generate\r\n a GraphQuery but only for the side paths, the other part of the query will not be impacted. This means that we cannot apply\r\n `minimum_should_match` or `cutoff_frequency` on GraphQuery anymore (only ES 5.3 does that because we generate all possible paths if a query has at least one multi terms synonym).\r\nStarting in 5.4 multi terms synonym will now be treated as a single term when `minimum_should_match` is computed and will be ignored when `cutoff_frequency` is set.\r\nFixes #23102", "number": 23117, "review_comments": [], "title": "Fix GraphQuery expectation after Lucene upgrade to 6.5" }
{ "commits": [ { "message": "Fix GraphQuery expectation after Lucene upgrade to 6.5\n\nGraphQueries are now generated as simple clauses in BooleanQuery. So for instance a multi terms synonym will generate\n a GraphQuery but only for the side paths, the other part of the query will not be impacted. This means that we cannot apply\n `minimum_should_match` or `cutoff_frequency` on GraphQuery anymore (only ES 5.3 does that because we generate all possible paths if a query has at least one multi terms synonym).\nStarting in 5.4 multi terms synonym will now be treated as a single term when `minimum_should_match` is computed and will be ignored when `cutoff_frequency` is set.\nFixes #23102" } ], "files": [ { "diff": "@@ -25,7 +25,6 @@\n import org.apache.lucene.search.BooleanClause.Occur;\n import org.apache.lucene.search.BooleanQuery;\n import org.apache.lucene.search.ConstantScoreQuery;\n-import org.apache.lucene.search.GraphQuery;\n import org.apache.lucene.search.MatchAllDocsQuery;\n import org.apache.lucene.search.MatchNoDocsQuery;\n import org.apache.lucene.search.PrefixQuery;\n@@ -149,18 +148,7 @@ public static Query maybeApplyMinimumShouldMatch(Query query, @Nullable String m\n return applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);\n } else if (query instanceof ExtendedCommonTermsQuery) {\n ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch);\n- } else if (query instanceof GraphQuery && ((GraphQuery) query).hasBoolean()) {\n- // we have a graph query that has at least one boolean sub-query\n- // re-build and set minimum should match value on all boolean queries\n- List<Query> oldQueries = ((GraphQuery) query).getQueries();\n- Query[] queries = new Query[oldQueries.size()];\n- for (int i = 0; i < queries.length; i++) {\n- queries[i] = maybeApplyMinimumShouldMatch(oldQueries.get(i), minimumShouldMatch);\n- }\n-\n- return new GraphQuery(queries);\n }\n-\n return query;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java", "status": "modified" }, { "diff": "@@ -19,10 +19,7 @@\n \n package org.elasticsearch.index.query;\n \n-import org.apache.lucene.queries.ExtendedCommonTermsQuery;\n-import org.apache.lucene.search.BooleanQuery;\n import org.apache.lucene.search.FuzzyQuery;\n-import org.apache.lucene.search.GraphQuery;\n import org.apache.lucene.search.Query;\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParsingException;\n@@ -37,7 +34,6 @@\n import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery;\n \n import java.io.IOException;\n-import java.util.List;\n import java.util.Locale;\n import java.util.Objects;\n ", "filename": "core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java", "status": "modified" }, { "diff": "@@ -20,8 +20,6 @@\n package org.elasticsearch.index.query;\n \n import org.apache.lucene.analysis.Analyzer;\n-import org.apache.lucene.search.BooleanQuery;\n-import org.apache.lucene.search.GraphQuery;\n import org.apache.lucene.search.Query;\n import org.elasticsearch.Version;\n import org.elasticsearch.common.ParseField;\n@@ -38,7 +36,6 @@\n \n import java.io.IOException;\n import java.util.HashMap;\n-import java.util.List;\n import java.util.Locale;\n import java.util.Map;\n import java.util.Objects;", "filename": "core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java", "status": "modified" }, { "diff": "@@ -375,23 +375,7 @@ public Query createCommonTermsQuery(String field, String queryText, Occur highFr\n if (booleanQuery != null && booleanQuery instanceof BooleanQuery) {\n BooleanQuery bq = (BooleanQuery) booleanQuery;\n return boolToExtendedCommonTermsQuery(bq, highFreqOccur, lowFreqOccur, maxTermFrequency, fieldType);\n- } else if (booleanQuery != null && booleanQuery instanceof GraphQuery && ((GraphQuery) booleanQuery).hasBoolean()) {\n- // we have a graph query that has at least one boolean sub-query\n- // re-build and use extended common terms\n- List<Query> oldQueries = ((GraphQuery) booleanQuery).getQueries();\n- Query[] queries = new Query[oldQueries.size()];\n- for (int i = 0; i < queries.length; i++) {\n- Query oldQuery = oldQueries.get(i);\n- if (oldQuery instanceof BooleanQuery) {\n- queries[i] = boolToExtendedCommonTermsQuery((BooleanQuery) oldQuery, highFreqOccur, lowFreqOccur, maxTermFrequency, fieldType);\n- } else {\n- queries[i] = oldQuery;\n- }\n- }\n-\n- return new GraphQuery(queries);\n }\n-\n return booleanQuery;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/search/MatchQuery.java", "status": "modified" }, { "diff": "@@ -19,10 +19,6 @@\n \n package org.elasticsearch.index.search;\n \n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;\n-\n import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n@@ -39,6 +35,10 @@\n import java.util.List;\n import java.util.concurrent.ExecutionException;\n \n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;\n+\n public class MatchQueryIT extends ESIntegTestCase {\n private static final String INDEX = \"test\";\n \n@@ -172,7 +172,6 @@ public void testPhrasePrefix() throws ExecutionException, InterruptedException {\n assertSearchHits(searchResponse, \"1\", \"2\", \"3\", \"7\", \"8\");\n }\n \n- @AwaitsFix(bugUrl=\"https://github.com/elastic/elasticsearch/issues/23102\")\n public void testCommonTerms() throws ExecutionException, InterruptedException {\n String route = \"commonTermsTest\";\n List<IndexRequestBuilder> builders = getDocs();\n@@ -185,8 +184,8 @@ public void testCommonTerms() throws ExecutionException, InterruptedException {\n // do a search with no cutoff frequency to show which docs should match\n SearchResponse searchResponse = client().prepareSearch(INDEX)\n .setRouting(route)\n- .setQuery(QueryBuilders.matchQuery(\"field\", \"foo three happened\")\n- .operator(Operator.OR).analyzer(\"lower_graphsyns\")).get();\n+ .setQuery(QueryBuilders.matchQuery(\"field\", \"bar three happened\")\n+ .operator(Operator.OR)).get();\n \n assertHitCount(searchResponse, 4L);\n assertSearchHits(searchResponse, \"1\", \"2\", \"5\", \"6\");\n@@ -195,8 +194,8 @@ public void testCommonTerms() throws ExecutionException, InterruptedException {\n // in this case, essentially everything but \"happened\" gets excluded\n searchResponse = client().prepareSearch(INDEX)\n .setRouting(route)\n- .setQuery(QueryBuilders.matchQuery(\"field\", \"foo three happened\")\n- .operator(Operator.OR).analyzer(\"lower_graphsyns\").cutoffFrequency(1f)).get();\n+ .setQuery(QueryBuilders.matchQuery(\"field\", \"bar three happened\")\n+ .operator(Operator.OR).cutoffFrequency(1f)).get();\n \n assertHitCount(searchResponse, 1L);\n assertSearchHits(searchResponse, \"1\");", "filename": "core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java", "status": "modified" } ] }
{ "body": "After #21123 when Elasticsearch receive a HEAD request it returns the Content-Length of the that it would return for a GET request with an empty response body. Except in the document exists, index exists, and type exists requests which return 0. We should fix them to also return the Content-Length that would be in the response.\n", "comments": [ { "body": "I'm adding the v5.1.0 label too, I think we should target a fix there.\n", "created_at": "2016-10-26T05:16:19Z" }, { "body": "These are all addressed now. Closing.", "created_at": "2017-06-12T12:10:12Z" } ], "number": 21125, "title": "Some endpoints return Content-Length: 0 for HEAD requests" }
{ "body": "Index HEAD requests incorrectly return a content-length header of\r\n0. This commit addresses this by removing the special handling for index\r\nHEAD requests, and just relying on the general mechanism that exists for\r\nhandling HEAD requests in the REST layer.\r\n\r\nRelates #21125\r\n\r\n", "number": 23112, "review_comments": [], "title": "Fix index HEAD requests" }
{ "commits": [ { "message": "Fix index HEAD requests\n\nIndex HEAD requests incorrectly return a content-length header of\n0. This commit addresses this by removing the special handling for index\nHEAD requests, and just relying on the general mechanism that exists for\nhandling HEAD requests in the REST layer." }, { "message": "Merge branch 'master' into fix-index-head\n\n* master:\n Fix alias HEAD requests\n Upgrade to lucene-6.5.0-snapshot-f919485. (#23087)\n Add BulkProcessor methods with XContentType parameter (#23078)" } ], "files": [ { "diff": "@@ -252,7 +252,6 @@\n import org.elasticsearch.rest.action.admin.indices.RestIndexDeleteAliasesAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndexPutAliasAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndicesAliasesAction;\n-import org.elasticsearch.rest.action.admin.indices.RestIndicesExistsAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndicesSegmentsAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndicesShardStoresAction;\n import org.elasticsearch.rest.action.admin.indices.RestIndicesStatsAction;\n@@ -527,7 +526,6 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) {\n registerHandler.accept(new RestDeleteSnapshotAction(settings, restController));\n registerHandler.accept(new RestSnapshotsStatusAction(settings, restController));\n \n- registerHandler.accept(new RestIndicesExistsAction(settings, restController));\n registerHandler.accept(new RestTypesExistsAction(settings, restController));\n registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter));\n registerHandler.accept(new RestIndicesStatsAction(settings, restController));", "filename": "core/src/main/java/org/elasticsearch/action/ActionModule.java", "status": "modified" }, { "diff": "@@ -46,6 +46,7 @@\n import java.util.Set;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n+import static org.elasticsearch.rest.RestRequest.Method.HEAD;\n import static org.elasticsearch.rest.RestStatus.OK;\n \n public class RestGetIndicesAction extends BaseRestHandler {\n@@ -58,6 +59,7 @@ public RestGetIndicesAction(Settings settings, RestController controller, IndexS\n super(settings);\n this.indexScopedSettings = indexScopedSettings;\n controller.registerHandler(GET, \"/{index}\", this);\n+ controller.registerHandler(HEAD, \"/{index}\", this);\n controller.registerHandler(GET, \"/{index}/{type}\", this);\n this.settingsFilter = settingsFilter;\n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java", "status": "modified" }, { "diff": "@@ -54,8 +54,8 @@ public void testDocumentExists() throws IOException {\n \n public void testIndexExists() throws IOException {\n createTestDoc();\n- headTestCase(\"test\", emptyMap(), equalTo(0));\n- headTestCase(\"test\", singletonMap(\"pretty\", \"true\"), equalTo(0));\n+ headTestCase(\"test\", emptyMap(), greaterThan(0));\n+ headTestCase(\"test\", singletonMap(\"pretty\", \"true\"), greaterThan(0));\n }\n \n public void testTypeExists() throws IOException {", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java", "status": "modified" } ] }
{ "body": "After #21123 when Elasticsearch receive a HEAD request it returns the Content-Length of the that it would return for a GET request with an empty response body. Except in the document exists, index exists, and type exists requests which return 0. We should fix them to also return the Content-Length that would be in the response.\n", "comments": [ { "body": "I'm adding the v5.1.0 label too, I think we should target a fix there.\n", "created_at": "2016-10-26T05:16:19Z" }, { "body": "These are all addressed now. Closing.", "created_at": "2017-06-12T12:10:12Z" } ], "number": 21125, "title": "Some endpoints return Content-Length: 0 for HEAD requests" }
{ "body": "Alias HEAD requests incorrectly return a content-length header of 0. This commit addresses this by removing the special handling for alias HEAD requests, and just relying on the general mechanism that exists for handling HEAD requests in the REST layer.\r\n\r\nRelates #21125", "number": 23094, "review_comments": [ { "body": "aren't we missing this one endpoint in RestGetAliasesAction?", "created_at": "2017-02-10T08:40:29Z" }, { "body": "never thought about using curly brackets to help the indentation when writing json through XContentBuilder. Smart!", "created_at": "2017-02-10T08:42:28Z" }, { "body": "I don't think this endpoint does anything, wouldn't it always 404 or am I missing something?", "created_at": "2017-02-10T13:32:08Z" }, { "body": "you are right. I have no idea why we had that. makes no sense. ", "created_at": "2017-02-10T14:12:46Z" } ], "title": "Fix alias HEAD requests" }
{ "commits": [ { "message": "Fix alias HEAD requests\n\nAlias HEAD requests incorrectly return a content-length header of\n0. This commit addresses this by removing the special handling for alias\nHEAD requests, and just relying on the general mechanism that exists for\nhandling HEAD requests in the REST layer." }, { "message": "Merge branch 'master' into fix-aliases-head\n\n* master:\n Cleanup RestGetAliasesAction.java\n Use `typed_keys` parameter to prefix suggester names by type in search responses (#23080)\n [Docs] Remove unnecessary // TEST[continued] in search-template doc" } ], "files": [ { "diff": "@@ -234,7 +234,6 @@\n import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction;\n import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction;\n import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction;\n-import org.elasticsearch.rest.action.admin.indices.RestAliasesExistAction;\n import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction;\n import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction;\n import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction;\n@@ -535,7 +534,6 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) {\n registerHandler.accept(new RestIndicesSegmentsAction(settings, restController));\n registerHandler.accept(new RestIndicesShardStoresAction(settings, restController));\n registerHandler.accept(new RestGetAliasesAction(settings, restController));\n- registerHandler.accept(new RestAliasesExistAction(settings, restController));\n registerHandler.accept(new RestIndexDeleteAliasesAction(settings, restController));\n registerHandler.accept(new RestIndexPutAliasAction(settings, restController));\n registerHandler.accept(new RestIndicesAliasesAction(settings, restController));", "filename": "core/src/main/java/org/elasticsearch/action/ActionModule.java", "status": "modified" }, { "diff": "@@ -44,6 +44,7 @@\n import java.util.stream.Collectors;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n+import static org.elasticsearch.rest.RestRequest.Method.HEAD;\n import static org.elasticsearch.rest.RestStatus.OK;\n \n /**\n@@ -54,7 +55,9 @@ public class RestGetAliasesAction extends BaseRestHandler {\n public RestGetAliasesAction(final Settings settings, final RestController controller) {\n super(settings);\n controller.registerHandler(GET, \"/_alias/{name}\", this);\n+ controller.registerHandler(HEAD, \"/_alias/{name}\", this);\n controller.registerHandler(GET, \"/{index}/_alias/{name}\", this);\n+ controller.registerHandler(HEAD, \"/{index}/_alias/{name}\", this);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import org.apache.http.entity.StringEntity;\n import org.elasticsearch.client.Response;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.test.rest.ESRestTestCase;\n import org.hamcrest.Matcher;\n \n@@ -29,6 +30,7 @@\n \n import static java.util.Collections.emptyMap;\n import static java.util.Collections.singletonMap;\n+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n \n@@ -62,10 +64,39 @@ public void testTypeExists() throws IOException {\n headTestCase(\"test/test\", singletonMap(\"pretty\", \"true\"), equalTo(0));\n }\n \n+ public void testAliasExists() throws IOException {\n+ createTestDoc();\n+ try (XContentBuilder builder = jsonBuilder()) {\n+ builder.startObject();\n+ {\n+ builder.startArray(\"actions\");\n+ {\n+ builder.startObject();\n+ {\n+ builder.startObject(\"add\");\n+ {\n+ builder.field(\"index\", \"test\");\n+ builder.field(\"alias\", \"test_alias\");\n+ }\n+ builder.endObject();\n+ }\n+ builder.endObject();\n+ }\n+ builder.endArray();\n+ }\n+ builder.endObject();\n+\n+ client().performRequest(\"POST\", \"_aliases\", emptyMap(), new StringEntity(builder.string()));\n+ headTestCase(\"/_alias/test_alias\", emptyMap(), greaterThan(0));\n+ headTestCase(\"/test/_alias/test_alias\", emptyMap(), greaterThan(0));\n+ }\n+ }\n+\n private void headTestCase(String url, Map<String, String> params, Matcher<Integer> matcher) throws IOException {\n Response response = client().performRequest(\"HEAD\", url, params);\n assertEquals(200, response.getStatusLine().getStatusCode());\n assertThat(Integer.valueOf(response.getHeader(\"Content-Length\")), matcher);\n assertNull(\"HEAD requests shouldn't have a response body but \" + url + \" did\", response.getEntity());\n }\n+\n }", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java", "status": "modified" } ] }
{ "body": "We use ExpandCollapseSearchResponseListener for field collapsing but this listener executes heavy operations in a blocking fashion. This could happen on a transport thread. We should do this in an async way.\r\n\r\nI just spoke with @jimczi how to fix it and he is taking it over. For now I will put a fix into 5.x and master since this triggers quite often due to a recent change.\r\n\r\nNote: this code is unreleased", "comments": [], "number": 23048, "title": "ExpandCollapseSearchResponseListener potentially executes blocking calls on a network thread" }
{ "body": "This changes removes the SearchResponseListener that was used by the ExpandCollapseSearchResponseListener to expand collapsed hits.\r\nInstead the expand calls are done directly in the SearchPhaseController in a asynchronous fashion.\r\nThe removal of SearchResponseListener is not a breaking change because it was never release.\r\nThis functionnality is not very useful as is and should be rethink during the great search refactoring that Simon is doing.\r\n\r\nCloses #23048", "number": 23053, "review_comments": [ { "body": "hmm any chance we can add a method to `SearchTransportService` that fires off a multisearch via transport service directly? I'd love to not go through a client here?", "created_at": "2017-02-09T14:45:56Z" }, { "body": "Right, we don't need the client here.\r\nDoes https://github.com/elastic/elasticsearch/pull/23053/commits/2c34484c607926d39ec6f0152e84f6185eaa7fed works for you ?", "created_at": "2017-02-09T15:14:49Z" } ], "title": "Replace blocking calls in ExpandCollapseSearchResponseListener by asynchronous requests" }
{ "commits": [ { "message": "Replace blocking calls in ExpandCollapseSearchResponseListener by asynchronous requests\n\nThis changes removes the SearchResponseListener that was used by the ExpandCollapseSearchResponseListener to expand collapsed hits.\nInstead the expand calls are done directly in the SearchPhaseController in a asynchronous fashion.\nThe removal of SearchResponseListener is not a breaking change because it was never release.\nThis functionnality is not very useful as is and should be rethink during the great search refactoring that Simon is doing.\n\nCloses #23048" }, { "message": "Revert \"Fork off a search thread before sending back fetched response\" now that expand sends non blocking requests" }, { "message": "Fix latch count down" }, { "message": "Execute asynchronous expand group query sequentially" }, { "message": "line lenght" }, { "message": "Adds a request parameter named max_concurrent_group_searches\n\nThe `max_concurrent_group_searches` request parameter can be used to control\nthe maximum number of concurrent searches allowed to retrieve the inner_hits per collapsed hit.\nSo instead of sending all group requests in parallel (or in sequence) this change uses a `multi_search` request\n that uses `max_concurrent_group_searches` to determine the number of concurrent searches allowed." }, { "message": "Address review" } ], "files": [ { "diff": "@@ -559,25 +559,13 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) {\n private void sendResponse(SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs,\n String scrollId, SearchPhaseController.ReducedQueryPhase reducedQueryPhase,\n AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {\n- // this is only a temporary fix since field collapsing executes a blocking call on response\n- // which could be a network thread. we are fixing this but for now we just fork off again.\n- // this should be removed once https://github.com/elastic/elasticsearch/issues/23048 is fixed\n- getExecutor().execute(new ActionRunnable<SearchResponse>(listener) {\n- @Override\n- public void doRun() throws IOException {\n- final boolean isScrollRequest = request.scroll() != null;\n- final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedDocs,\n- reducedQueryPhase, fetchResultsArr);\n- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),\n- buildTookInMillis(), buildShardFailures()));\n- }\n-\n- @Override\n- public void onFailure(Exception e) {\n- raisePhaseFailure(new ReduceSearchPhaseException(\"fetch\", \"\", e, buildShardFailures()));\n- }\n- });\n+ final boolean isScrollRequest = request.scroll() != null;\n+ final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedDocs, reducedQueryPhase,\n+ fetchResultsArr);\n+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),\n+ buildTookInMillis(), buildShardFailures()));\n }\n-\n }\n+\n+\n }", "filename": "core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java", "status": "modified" }, { "diff": "@@ -38,15 +38,15 @@\n import org.elasticsearch.common.util.BigArrays;\n import org.elasticsearch.common.util.concurrent.AtomicArray;\n import org.elasticsearch.script.ScriptService;\n+import org.elasticsearch.search.SearchHit;\n+import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;\n import org.elasticsearch.search.dfs.AggregatedDfs;\n import org.elasticsearch.search.dfs.DfsSearchResult;\n import org.elasticsearch.search.fetch.FetchSearchResult;\n-import org.elasticsearch.search.SearchHit;\n-import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.search.internal.InternalSearchResponse;\n import org.elasticsearch.search.profile.ProfileShardResult;\n import org.elasticsearch.search.profile.SearchProfileShardResults;\n@@ -65,7 +65,6 @@\n import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n-import java.util.function.BiConsumer;\n import java.util.stream.Collectors;\n import java.util.stream.StreamSupport;\n \n@@ -83,25 +82,11 @@ public class SearchPhaseController extends AbstractComponent {\n \n private final BigArrays bigArrays;\n private final ScriptService scriptService;\n- private final List<BiConsumer<SearchRequest, SearchResponse> > searchResponseListener;\n \n public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {\n- this(settings, bigArrays, scriptService, Collections.emptyList());\n- }\n-\n- public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService,\n- List<BiConsumer<SearchRequest, SearchResponse> > searchResponseListener) {\n super(settings);\n this.bigArrays = bigArrays;\n this.scriptService = scriptService;\n- this.searchResponseListener = searchResponseListener;\n- }\n-\n- /**\n- * Returns the search response listeners registry\n- */\n- public List<BiConsumer<SearchRequest, SearchResponse> > getSearchResponseListener() {\n- return searchResponseListener;\n }\n \n public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {", "filename": "core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java", "status": "modified" }, { "diff": "@@ -173,6 +173,15 @@ private void sendExecuteFetch(Transport.Connection connection, String action, fi\n new ActionListenerResponseHandler<>(listener, FetchSearchResult::new));\n }\n \n+ /**\n+ * Used by {@link TransportSearchAction} to send the expand queries (field collapsing).\n+ */\n+ void sendExecuteMultiSearch(DiscoveryNode node, final MultiSearchRequest request, SearchTask task,\n+ final ActionListener<MultiSearchResponse> listener) {\n+ transportService.sendChildRequest(transportService.getConnection(node), MultiSearchAction.NAME, request, task,\n+ new ActionListenerResponseHandler<>(listener, MultiSearchResponse::new));\n+ }\n+\n public RemoteClusterService getRemoteClusterService() {\n return remoteClusterService;\n }", "filename": "core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java", "status": "modified" }, { "diff": "@@ -36,8 +36,15 @@\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.Index;\n+import org.elasticsearch.index.query.BoolQueryBuilder;\n+import org.elasticsearch.index.query.InnerHitBuilder;\n+import org.elasticsearch.index.query.QueryBuilder;\n+import org.elasticsearch.index.query.QueryBuilders;\n+import org.elasticsearch.search.SearchHit;\n+import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.search.SearchService;\n import org.elasticsearch.search.builder.SearchSourceBuilder;\n+import org.elasticsearch.search.collapse.CollapseBuilder;\n import org.elasticsearch.search.internal.AliasFilter;\n import org.elasticsearch.tasks.Task;\n import org.elasticsearch.threadpool.ThreadPool;\n@@ -47,11 +54,11 @@\n import java.util.ArrayList;\n import java.util.Collections;\n import java.util.HashMap;\n+import java.util.Iterator;\n import java.util.List;\n import java.util.Map;\n import java.util.Set;\n import java.util.concurrent.Executor;\n-import java.util.function.BiConsumer;\n import java.util.function.Function;\n \n import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;\n@@ -212,16 +219,18 @@ private void executeSearch(SearchTask task, long startTimeInMillis, SearchReques\n return connection;\n };\n \n+ // Only enrich the search response iff collapsing has been specified:\n final ActionListener<SearchResponse> wrapper;\n- if (searchPhaseController.getSearchResponseListener().size() > 0) {\n+ if (searchRequest.source() != null &&\n+ searchRequest.source().collapse() != null &&\n+ searchRequest.source().collapse().getInnerHit() != null) {\n+\n wrapper = ActionListener.wrap(searchResponse -> {\n- List<BiConsumer<SearchRequest, SearchResponse>> responseListeners =\n- searchPhaseController.getSearchResponseListener();\n- for (BiConsumer<SearchRequest, SearchResponse> respListener : responseListeners) {\n- respListener.accept(searchRequest, searchResponse);\n+ if (searchResponse.getHits().getHits().length == 0) {\n+ listener.onResponse(searchResponse);\n+ } else {\n+ expandCollapsedHits(nodes.getLocalNode(), task, searchRequest, searchResponse, listener);\n }\n- listener.onResponse(searchResponse);\n-\n }, listener::onFailure);\n } else {\n wrapper = listener;\n@@ -284,4 +293,90 @@ private static void failIfOverShardCountLimit(ClusterService clusterService, int\n + \"] to a greater value if you really want to query that many shards at the same time.\");\n }\n }\n+\n+ /**\n+ * Expands collapsed using the {@link CollapseBuilder#innerHit} options.\n+ */\n+ void expandCollapsedHits(DiscoveryNode node,\n+ SearchTask parentTask,\n+ SearchRequest searchRequest,\n+ SearchResponse searchResponse,\n+ ActionListener<SearchResponse> finalListener) {\n+ CollapseBuilder collapseBuilder = searchRequest.source().collapse();\n+ MultiSearchRequest multiRequest = new MultiSearchRequest();\n+ if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) {\n+ multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests());\n+ }\n+ for (SearchHit hit : searchResponse.getHits()) {\n+ BoolQueryBuilder groupQuery = new BoolQueryBuilder();\n+ Object collapseValue = hit.field(collapseBuilder.getField()).getValue();\n+ if (collapseValue != null) {\n+ groupQuery.filter(QueryBuilders.matchQuery(collapseBuilder.getField(), collapseValue));\n+ } else {\n+ groupQuery.mustNot(QueryBuilders.existsQuery(collapseBuilder.getField()));\n+ }\n+ QueryBuilder origQuery = searchRequest.source().query();\n+ if (origQuery != null) {\n+ groupQuery.must(origQuery);\n+ }\n+ SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(collapseBuilder.getInnerHit())\n+ .query(groupQuery);\n+ SearchRequest groupRequest = new SearchRequest(searchRequest.indices())\n+ .types(searchRequest.types())\n+ .source(sourceBuilder);\n+ multiRequest.add(groupRequest);\n+ }\n+ searchTransportService.sendExecuteMultiSearch(node, multiRequest, parentTask,\n+ ActionListener.wrap(response -> {\n+ Iterator<MultiSearchResponse.Item> it = response.iterator();\n+ for (SearchHit hit : searchResponse.getHits()) {\n+ MultiSearchResponse.Item item = it.next();\n+ if (item.isFailure()) {\n+ finalListener.onFailure(item.getFailure());\n+ return;\n+ }\n+ SearchHits innerHits = item.getResponse().getHits();\n+ if (hit.getInnerHits() == null) {\n+ hit.setInnerHits(new HashMap<>(1));\n+ }\n+ hit.getInnerHits().put(collapseBuilder.getInnerHit().getName(), innerHits);\n+ }\n+ finalListener.onResponse(searchResponse);\n+ }, finalListener::onFailure)\n+ );\n+ }\n+\n+ private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options) {\n+ SearchSourceBuilder groupSource = new SearchSourceBuilder();\n+ groupSource.from(options.getFrom());\n+ groupSource.size(options.getSize());\n+ if (options.getSorts() != null) {\n+ options.getSorts().forEach(groupSource::sort);\n+ }\n+ if (options.getFetchSourceContext() != null) {\n+ if (options.getFetchSourceContext().includes() == null && options.getFetchSourceContext().excludes() == null) {\n+ groupSource.fetchSource(options.getFetchSourceContext().fetchSource());\n+ } else {\n+ groupSource.fetchSource(options.getFetchSourceContext().includes(),\n+ options.getFetchSourceContext().excludes());\n+ }\n+ }\n+ if (options.getDocValueFields() != null) {\n+ options.getDocValueFields().forEach(groupSource::docValueField);\n+ }\n+ if (options.getStoredFieldsContext() != null && options.getStoredFieldsContext().fieldNames() != null) {\n+ options.getStoredFieldsContext().fieldNames().forEach(groupSource::storedField);\n+ }\n+ if (options.getScriptFields() != null) {\n+ for (SearchSourceBuilder.ScriptField field : options.getScriptFields()) {\n+ groupSource.scriptField(field.fieldName(), field.script());\n+ }\n+ }\n+ if (options.getHighlightBuilder() != null) {\n+ groupSource.highlighter(options.getHighlightBuilder());\n+ }\n+ groupSource.explain(options.isExplain());\n+ groupSource.trackScores(options.isTrackScores());\n+ return groupSource;\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java", "status": "modified" }, { "diff": "@@ -350,7 +350,7 @@ protected Node(final Environment environment, Collection<Class<? extends Plugin>\n IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));\n modules.add(indicesModule);\n \n- SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class), client);\n+ SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));\n CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(),\n settingsModule.getClusterSettings());\n resourcesToClose.add(circuitBreakerService);\n@@ -451,7 +451,7 @@ protected Node(final Environment environment, Collection<Class<? extends Plugin>\n b.bind(SearchTransportService.class).toInstance(new SearchTransportService(settings,\n settingsModule.getClusterSettings(), transportService));\n b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays,\n- scriptModule.getScriptService(), searchModule.getSearchResponseListeners()));\n+ scriptModule.getScriptService()));\n b.bind(Transport.class).toInstance(transport);\n b.bind(TransportService.class).toInstance(transportService);\n b.bind(NetworkService.class).toInstance(networkService);", "filename": "core/src/main/java/org/elasticsearch/node/Node.java", "status": "modified" }, { "diff": "@@ -22,7 +22,6 @@\n import org.apache.lucene.search.BooleanQuery;\n import org.elasticsearch.action.search.SearchRequest;\n import org.elasticsearch.action.search.SearchResponse;\n-import org.elasticsearch.client.Client;\n import org.elasticsearch.common.NamedRegistry;\n import org.elasticsearch.common.geo.ShapesAvailability;\n import org.elasticsearch.common.geo.builders.ShapeBuilders;\n@@ -223,7 +222,6 @@\n import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;\n import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;\n import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator;\n-import org.elasticsearch.search.collapse.ExpandCollapseSearchResponseListener;\n import org.elasticsearch.search.fetch.FetchPhase;\n import org.elasticsearch.search.fetch.FetchSubPhase;\n import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;\n@@ -280,17 +278,12 @@ public class SearchModule {\n \"moving_avg_model\");\n \n private final List<FetchSubPhase> fetchSubPhases = new ArrayList<>();\n- private final List<BiConsumer<SearchRequest, SearchResponse> > searchResponseListeners = new ArrayList<> ();\n \n private final Settings settings;\n private final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();\n private final List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>();\n \n public SearchModule(Settings settings, boolean transportClient, List<SearchPlugin> plugins) {\n- this(settings, transportClient, plugins, null);\n- }\n-\n- public SearchModule(Settings settings, boolean transportClient, List<SearchPlugin> plugins, Client client) {\n this.settings = settings;\n this.transportClient = transportClient;\n registerSuggesters(plugins);\n@@ -306,9 +299,6 @@ public SearchModule(Settings settings, boolean transportClient, List<SearchPlugi\n registerPipelineAggregations(plugins);\n registerFetchSubPhases(plugins);\n registerSearchExts(plugins);\n- if (false == transportClient) {\n- registerSearchResponseListeners(client, plugins);\n- }\n registerShapes();\n }\n \n@@ -341,13 +331,6 @@ public ParseFieldRegistry<MovAvgModel.AbstractModelParser> getMovingAverageModel\n return movingAverageModelParserRegistry;\n }\n \n- /**\n- * Returns the search response listeners registry\n- */\n- public List<BiConsumer<SearchRequest, SearchResponse> > getSearchResponseListeners() {\n- return searchResponseListeners;\n- }\n-\n private void registerAggregations(List<SearchPlugin> plugins) {\n registerAggregation(new AggregationSpec(AvgAggregationBuilder.NAME, AvgAggregationBuilder::new, AvgAggregationBuilder::parse)\n .addResultReader(InternalAvg::new));\n@@ -699,13 +682,6 @@ private void registerFetchSubPhases(List<SearchPlugin> plugins) {\n registerFromPlugin(plugins, p -> p.getFetchSubPhases(context), this::registerFetchSubPhase);\n }\n \n- private void registerSearchResponseListeners(Client client, List<SearchPlugin> plugins) {\n- if (client != null) {\n- registerSearchResponseListener(new ExpandCollapseSearchResponseListener(client));\n- }\n- registerFromPlugin(plugins, p -> p.getSearchResponseListeners(), this::registerSearchResponseListener);\n- }\n-\n private void registerSearchExts(List<SearchPlugin> plugins) {\n registerFromPlugin(plugins, SearchPlugin::getSearchExts, this::registerSearchExt);\n }\n@@ -791,10 +767,6 @@ private void registerQuery(QuerySpec<?> spec) {\n (p, c) -> spec.getParser().fromXContent((QueryParseContext) c)));\n }\n \n- private void registerSearchResponseListener(BiConsumer<SearchRequest, SearchResponse> listener) {\n- searchResponseListeners.add(requireNonNull(listener, \"SearchResponseListener must not be null\"));\n- }\n-\n public FetchPhase getFetchPhase() {\n return new FetchPhase(fetchSubPhases);\n }", "filename": "core/src/main/java/org/elasticsearch/search/SearchModule.java", "status": "modified" }, { "diff": "@@ -45,17 +45,20 @@\n public class CollapseBuilder extends ToXContentToBytes implements Writeable {\n public static final ParseField FIELD_FIELD = new ParseField(\"field\");\n public static final ParseField INNER_HITS_FIELD = new ParseField(\"inner_hits\");\n+ public static final ParseField MAX_CONCURRENT_GROUP_REQUESTS_FIELD = new ParseField(\"max_concurrent_group_searches\");\n private static final ObjectParser<CollapseBuilder, QueryParseContext> PARSER =\n new ObjectParser<>(\"collapse\", CollapseBuilder::new);\n \n static {\n PARSER.declareString(CollapseBuilder::setField, FIELD_FIELD);\n+ PARSER.declareInt(CollapseBuilder::setMaxConcurrentGroupRequests, MAX_CONCURRENT_GROUP_REQUESTS_FIELD);\n PARSER.declareObject(CollapseBuilder::setInnerHits,\n (p, c) -> InnerHitBuilder.fromXContent(c), INNER_HITS_FIELD);\n }\n \n private String field;\n private InnerHitBuilder innerHit;\n+ private int maxConcurrentGroupRequests = 0;\n \n private CollapseBuilder() {}\n \n@@ -70,12 +73,14 @@ public CollapseBuilder(String field) {\n \n public CollapseBuilder(StreamInput in) throws IOException {\n this.field = in.readString();\n+ this.maxConcurrentGroupRequests = in.readVInt();\n this.innerHit = in.readOptionalWriteable(InnerHitBuilder::new);\n }\n \n @Override\n public void writeTo(StreamOutput out) throws IOException {\n out.writeString(field);\n+ out.writeVInt(maxConcurrentGroupRequests);\n out.writeOptionalWriteable(innerHit);\n }\n \n@@ -84,6 +89,7 @@ public static CollapseBuilder fromXContent(QueryParseContext context) throws IOE\n return builder;\n }\n \n+ // for object parser only\n private CollapseBuilder setField(String field) {\n if (Strings.isEmpty(field)) {\n throw new IllegalArgumentException(\"field name is null or empty\");\n@@ -97,6 +103,14 @@ public CollapseBuilder setInnerHits(InnerHitBuilder innerHit) {\n return this;\n }\n \n+ public CollapseBuilder setMaxConcurrentGroupRequests(int num) {\n+ if (num < 1) {\n+ throw new IllegalArgumentException(\"maxConcurrentGroupRequests` must be positive\");\n+ }\n+ this.maxConcurrentGroupRequests = num;\n+ return this;\n+ }\n+\n /**\n * The name of the field to collapse against\n */\n@@ -111,6 +125,13 @@ public InnerHitBuilder getInnerHit() {\n return this.innerHit;\n }\n \n+ /**\n+ * Returns the amount of group requests that are allowed to be ran concurrently in the inner_hits phase.\n+ */\n+ public int getMaxConcurrentGroupRequests() {\n+ return maxConcurrentGroupRequests;\n+ }\n+\n @Override\n public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {\n builder.startObject();\n@@ -121,6 +142,9 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par\n \n private void innerToXContent(XContentBuilder builder) throws IOException {\n builder.field(FIELD_FIELD.getPreferredName(), field);\n+ if (maxConcurrentGroupRequests > 0) {\n+ builder.field(MAX_CONCURRENT_GROUP_REQUESTS_FIELD.getPreferredName(), maxConcurrentGroupRequests);\n+ }\n if (innerHit != null) {\n builder.field(INNER_HITS_FIELD.getPreferredName(), innerHit);\n }\n@@ -133,13 +157,18 @@ public boolean equals(Object o) {\n \n CollapseBuilder that = (CollapseBuilder) o;\n \n- if (field != null ? !field.equals(that.field) : that.field != null) return false;\n+ if (maxConcurrentGroupRequests != that.maxConcurrentGroupRequests) return false;\n+ if (!field.equals(that.field)) return false;\n return innerHit != null ? innerHit.equals(that.innerHit) : that.innerHit == null;\n+\n }\n \n @Override\n public int hashCode() {\n- return Objects.hash(this.field, this.innerHit);\n+ int result = field.hashCode();\n+ result = 31 * result + (innerHit != null ? innerHit.hashCode() : 0);\n+ result = 31 * result + maxConcurrentGroupRequests;\n+ return result;\n }\n \n public CollapseContext build(SearchContext context) {", "filename": "core/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java", "status": "modified" }, { "diff": "@@ -18,8 +18,6 @@\n */\n package org.elasticsearch.search;\n \n-import org.elasticsearch.action.search.SearchRequest;\n-import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.common.inject.ModuleTestCase;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n@@ -71,7 +69,6 @@\n import java.util.List;\n import java.util.Map;\n import java.util.Set;\n-import java.util.function.BiConsumer;\n \n import static java.util.Collections.emptyList;\n import static java.util.Collections.singletonList;\n@@ -259,18 +256,6 @@ public List<PipelineAggregationSpec> getPipelineAggregations() {\n hasSize(1));\n }\n \n- public void testRegisterSearchResponseListener() {\n- BiConsumer<SearchRequest, SearchResponse> listener = (s, r) -> {};\n- SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {\n- public List<BiConsumer<SearchRequest, SearchResponse>> getSearchResponseListeners() {\n- return singletonList(listener);\n- }\n- }));\n- List<BiConsumer<SearchRequest, SearchResponse>> listeners = module.getSearchResponseListeners();\n- assertEquals(listeners.size(), 1);\n- assertEquals(listeners.get(0), listener);\n- }\n-\n private static final String[] NON_DEPRECATED_QUERIES = new String[] {\n \"bool\",\n \"boosting\",", "filename": "core/src/test/java/org/elasticsearch/search/SearchModuleTests.java", "status": "modified" }, { "diff": "@@ -68,6 +68,7 @@ public static void afterClass() throws Exception {\n \n public static CollapseBuilder randomCollapseBuilder() {\n CollapseBuilder builder = new CollapseBuilder(randomAsciiOfLength(10));\n+ builder.setMaxConcurrentGroupRequests(randomIntBetween(1, 48));\n if (randomBoolean()) {\n InnerHitBuilder innerHit = InnerHitBuilderTests.randomInnerHits(false, false);\n builder.setInnerHits(innerHit);", "filename": "core/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java", "status": "modified" }, { "diff": "@@ -54,7 +54,8 @@ GET /twitter/tweet/_search\n \"name\": \"last_tweets\", <2>\n \"size\": 5, <3>\n \"sort\": [{ \"date\": \"asc\" }] <4>\n- }\n+ },\n+ \"max_concurrent_group_searches\": 4 <5>\n },\n \"sort\": [\"likes\"]\n }\n@@ -65,8 +66,15 @@ GET /twitter/tweet/_search\n <2> the name used for the inner hit section in the response\n <3> the number of inner_hits to retrieve per collapse key\n <4> how to sort the document inside each group\n+<5> the number of concurrent requests allowed to retrieve the inner_hits` per group\n \n See <<search-request-inner-hits, inner hits>> for the complete list of supported options and the format of the response.\n \n+The expansion of the group is done by sending an additional query for each\n+collapsed hit returned in the response.\n+The `max_concurrent_group_searches` request parameter can be used to control\n+the maximum number of concurrent searches allowed in this phase.\n+The default is based on the number of data nodes and the default search thread pool size.\n+\n WARNING: `collapse` cannot be used in conjunction with <<search-request-scroll, scroll>>,\n <<search-request-rescore, rescore>> or <<search-request-search-after, search after>>.", "filename": "docs/reference/search/request/collapse.asciidoc", "status": "modified" }, { "diff": "@@ -47,7 +47,7 @@ setup:\n \n - skip:\n version: \" - 5.2.99\"\n- reason: this uses a new API that has been added in 6.0\n+ reason: this uses a new API that has been added in 5.3\n \n - do:\n search:\n@@ -83,7 +83,7 @@ setup:\n \n - skip:\n version: \" - 5.2.99\"\n- reason: this uses a new API that has been added in 6.0\n+ reason: this uses a new API that has been added in 5.3\n \n - do:\n search:\n@@ -108,7 +108,7 @@ setup:\n \n - skip:\n version: \" - 5.2.99\"\n- reason: this uses a new API that has been added in 6.0\n+ reason: this uses a new API that has been added in 5.3\n \n - do:\n search:\n@@ -147,12 +147,58 @@ setup:\n - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._id: \"5\" }\n - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._id: \"4\" }\n \n+\n+---\n+\"field collapsing, inner_hits and maxConcurrentGroupRequests\":\n+\n+ - skip:\n+ version: \" - 5.2.99\"\n+ reason: this uses a new API that has been added in 5.3\n+\n+ - do:\n+ search:\n+ index: test\n+ type: test\n+ body:\n+ collapse: { field: numeric_group, max_concurrent_group_searches: 10, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } }\n+ sort: [{ sort: desc }]\n+\n+ - match: { hits.total: 6 }\n+ - length: { hits.hits: 3 }\n+ - match: { hits.hits.0._index: test }\n+ - match: { hits.hits.0._type: test }\n+ - match: { hits.hits.0.fields.numeric_group: [3] }\n+ - match: { hits.hits.0.sort: [36] }\n+ - match: { hits.hits.0._id: \"6\" }\n+ - match: { hits.hits.0.inner_hits.sub_hits.hits.total: 1 }\n+ - length: { hits.hits.0.inner_hits.sub_hits.hits.hits: 1 }\n+ - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._id: \"6\" }\n+ - match: { hits.hits.1._index: test }\n+ - match: { hits.hits.1._type: test }\n+ - match: { hits.hits.1.fields.numeric_group: [1] }\n+ - match: { hits.hits.1.sort: [24] }\n+ - match: { hits.hits.1._id: \"3\" }\n+ - match: { hits.hits.1.inner_hits.sub_hits.hits.total: 3 }\n+ - length: { hits.hits.1.inner_hits.sub_hits.hits.hits: 2 }\n+ - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._id: \"2\" }\n+ - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._id: \"1\" }\n+ - match: { hits.hits.2._index: test }\n+ - match: { hits.hits.2._type: test }\n+ - match: { hits.hits.2.fields.numeric_group: [25] }\n+ - match: { hits.hits.2.sort: [10] }\n+ - match: { hits.hits.2._id: \"4\" }\n+ - match: { hits.hits.2.inner_hits.sub_hits.hits.total: 2 }\n+ - length: { hits.hits.2.inner_hits.sub_hits.hits.hits: 2 }\n+ - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._id: \"5\" }\n+ - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._id: \"4\" }\n+\n+\n ---\n \"field collapsing and scroll\":\n \n - skip:\n version: \" - 5.2.99\"\n- reason: this uses a new API that has been added in 6.0\n+ reason: this uses a new API that has been added in 5.3\n \n - do:\n catch: /cannot use \\`collapse\\` in a scroll context/\n@@ -168,7 +214,7 @@ setup:\n \n - skip:\n version: \" - 5.2.99\"\n- reason: this uses a new API that has been added in 6.0\n+ reason: this uses a new API that has been added in 5.3\n \n - do:\n catch: /cannot use \\`collapse\\` in conjunction with \\`search_after\\`/\n@@ -185,7 +231,7 @@ setup:\n \n - skip:\n version: \" - 5.2.99\"\n- reason: this uses a new API that has been added in 6.0\n+ reason: this uses a new API that has been added in 5.3\n \n - do:\n catch: /cannot use \\`collapse\\` in conjunction with \\`rescore\\`/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yaml", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.4 and 2.4.3\r\n\r\n**Plugins installed**: None\r\n\r\n**JVM version**: OpenJDK 1.8.0.101-3.b13\r\n\r\n**OS version**: CentOS 6.8\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nSearch threads became completely locked up sometimes. All search threads have the same track trace. No progress is made until we restart Elasticsearch.\r\n\r\nWe seen this problem on 2.3.4 and 2.4.3. \r\n\r\n**Steps to reproduce**: Do not know how to reproduce it\r\n\r\n**Provide logs (if relevant)**:\r\n\r\nHere is stack trace from 2.3.4:\r\n\r\n```\r\n2017-02-03 10:35:44.216064500 \"elasticsearch[main][search][T#12]\" #116 daemon prio=5 os_prio=0 tid=0x00007f498c0a7800 nid=0x266f in Object.wait() [0x00007f48d18da000]\r\n2017-02-03 10:35:44.216089500 java.lang.Thread.State: RUNNABLE\r\n2017-02-03 10:35:44.216090500 \tat org.elasticsearch.search.aggregations.support.AggregationContext.bytesField(AggregationContext.java:178)\r\n2017-02-03 10:35:44.216098500 \tat org.elasticsearch.search.aggregations.support.AggregationContext.originalValuesSource(AggregationContext.java:151)\r\n2017-02-03 10:35:44.216106500 \tat org.elasticsearch.search.aggregations.support.AggregationContext.valuesSource(AggregationContext.java:85)\r\n2017-02-03 10:35:44.216108500 \tat org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory.createInternal(ValuesSourceAggregatorFactory.java:60)\r\n2017-02-03 10:35:44.216116500 \tat org.elasticsearch.search.aggregations.AggregatorFactory.create(AggregatorFactory.java:102)\r\n2017-02-03 10:35:44.216118500 \tat org.elasticsearch.search.aggregations.AggregatorFactories.createSubAggregators(AggregatorFactories.java:76)\r\n2017-02-03 10:35:44.216127500 \tat org.elasticsearch.search.aggregations.AggregatorBase.<init>(AggregatorBase.java:69)\r\n2017-02-03 10:35:44.216128500 \tat org.elasticsearch.search.aggregations.bucket.BucketsAggregator.<init>(BucketsAggregator.java:48)\r\n2017-02-03 10:35:44.216137500 \tat org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator.<init>(SingleBucketAggregator.java:38)\r\n2017-02-03 10:35:44.216139500 \tat org.elasticsearch.search.aggregations.bucket.filter.FilterAggregator.<init>(FilterAggregator.java:54)\r\n2017-02-03 10:35:44.216146500 \tat org.elasticsearch.search.aggregations.bucket.filter.FilterAggregator$Factory.createInternal(FilterAggregator.java:108)\r\n2017-02-03 10:35:44.216148500 \tat org.elasticsearch.search.aggregations.AggregatorFactory.create(AggregatorFactory.java:102)\r\n2017-02-03 10:35:44.216156500 \tat org.elasticsearch.search.aggregations.AggregatorFactories.createSubAggregators(AggregatorFactories.java:76)\r\n2017-02-03 10:35:44.216158500 \tat org.elasticsearch.search.aggregations.AggregatorBase.<init>(AggregatorBase.java:69)\r\n2017-02-03 10:35:44.216166500 \tat org.elasticsearch.search.aggregations.bucket.BucketsAggregator.<init>(BucketsAggregator.java:48)\r\n2017-02-03 10:35:44.216168500 \tat org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator.<init>(SingleBucketAggregator.java:38)\r\n2017-02-03 10:35:44.216175500 \tat org.elasticsearch.search.aggregations.bucket.nested.NestedAggregator.<init>(NestedAggregator.java:61)\r\n2017-02-03 10:35:44.216177500 \tat org.elasticsearch.search.aggregations.bucket.nested.NestedAggregator$Factory.createInternal(NestedAggregator.java:168)\r\n2017-02-03 10:35:44.216185500 \tat org.elasticsearch.search.aggregations.AggregatorFactory.create(AggregatorFactory.java:102)\r\n2017-02-03 10:35:44.216186500 \tat org.elasticsearch.search.aggregations.AggregatorFactories.createTopLevelAggregators(AggregatorFactories.java:87)\r\n2017-02-03 10:35:44.216194500 \tat org.elasticsearch.search.aggregations.AggregationPhase.preProcess(AggregationPhase.java:85)\r\n2017-02-03 10:35:44.216202500 \tat org.elasticsearch.search.query.QueryPhase.execute(QueryPhase.java:111)\r\n2017-02-03 10:35:44.216209500 \tat org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase(SearchService.java:366)\r\n2017-02-03 10:35:44.216217500 \tat org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:378)\r\n2017-02-03 10:35:44.216225500 \tat org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryTransportHandler.messageReceived(SearchServiceTransportAction.java:368)\r\n2017-02-03 10:35:44.216234500 \tat org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryTransportHandler.messageReceived(SearchServiceTransportAction.java:365)\r\n2017-02-03 10:35:44.216236500 \tat org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\r\n2017-02-03 10:35:44.216245500 \tat org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:75)\r\n2017-02-03 10:35:44.216252500 \tat org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:376)\r\n2017-02-03 10:35:44.216254500 \tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\r\n2017-02-03 10:35:44.216261500 \tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\r\n2017-02-03 10:35:44.216277500 \tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\r\n2017-02-03 10:35:44.216279500 \tat java.lang.Thread.run(Thread.java:745)\r\n2017-02-03 10:35:44.216280500 \r\n```\r\n", "comments": [ { "body": "@maxcom could you provide us with the aggregations that you are running please?", "created_at": "2017-02-03T09:12:02Z" }, { "body": "I can't find exact query that causes the problem. I'll add more logging to our application and wait for next reproduction.", "created_at": "2017-02-03T09:24:00Z" }, { "body": "The state of the thread is runnable, so I suspect this is not really a deadlock, but rather a memory pressure issue: so much time is spent doing garbage collection that the application does not seem to make any progress. Do you have some monitoring data of garbage collection activity / memory usage of the JVM?", "created_at": "2017-02-03T09:29:00Z" }, { "body": "I do not think that it is a memory issue. CPU is not busy at all; we see no GC activity in our logs. And no progress is made until we restart Elasticsearch (for ~30 minutes).", "created_at": "2017-02-03T09:33:20Z" }, { "body": "I dig into sources of Elasticsearch and I think that is can be some kind of class initialization deadlock. Similar problem is described here: https://ternarysearch.blogspot.ru/2013/07/static-initialization-deadlock.html", "created_at": "2017-02-03T09:38:00Z" }, { "body": "One more stack trace (from Elasticsearch 2.4.3):\r\n\r\n```\r\n2017-02-02 16:09:32.129209500 \"elasticsearch[main][search][T#6]\" #71 daemon prio=5 os_prio=0 tid=0x00007f5a24083000 nid=0x2e51 in Object.wait() [0x00007f59ef6d2000]\r\n2017-02-02 16:09:32.129220500 java.lang.Thread.State: RUNNABLE\r\n2017-02-02 16:09:32.130068500 at org.elasticsearch.search.aggregations.support.ValuesSource$Bytes.<clinit>(ValuesSource.java:66)\r\n2017-02-02 16:09:32.130070500 at org.elasticsearch.search.aggregations.support.AggregationContext.bytesField(AggregationContext.java:176)\r\n2017-02-02 16:09:32.130071500 at org.elasticsearch.search.aggregations.support.AggregationContext.originalValuesSource(AggregationContext.java:151)\r\n2017-02-02 16:09:32.130073500 at org.elasticsearch.search.aggregations.support.AggregationContext.valuesSource(AggregationContext.java:85)\r\n2017-02-02 16:09:32.130088500 at org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory.createInternal(ValuesSourceAggregatorFactory.java:60)\r\n2017-02-02 16:09:32.130090500 at org.elasticsearch.search.aggregations.AggregatorFactory.create(AggregatorFactory.java:102)\r\n2017-02-02 16:09:32.130091500 at org.elasticsearch.search.aggregations.AggregatorFactories.createSubAggregators(AggregatorFactories.java:76)\r\n2017-02-02 16:09:32.130096500 at org.elasticsearch.search.aggregations.AggregatorBase.<init>(AggregatorBase.java:69)\r\n2017-02-02 16:09:32.130097500 at org.elasticsearch.search.aggregations.bucket.BucketsAggregator.<init>(BucketsAggregator.java:48)\r\n2017-02-02 16:09:32.130098500 at org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator.<init>(SingleBucketAggregator.java:38)\r\n2017-02-02 16:09:32.130104500 at org.elasticsearch.search.aggregations.bucket.filter.FilterAggregator.<init>(FilterAggregator.java:54)\r\n2017-02-02 16:09:32.130106500 at org.elasticsearch.search.aggregations.bucket.filter.FilterAggregator$Factory.createInternal(FilterAggregator.java:108)\r\n2017-02-02 16:09:32.130107500 at org.elasticsearch.search.aggregations.AggregatorFactory.create(AggregatorFactory.java:102)\r\n2017-02-02 16:09:32.130111500 at org.elasticsearch.search.aggregations.AggregatorFactories.createTopLevelAggregators(AggregatorFactories.java:87)\r\n2017-02-02 16:09:32.130113500 at org.elasticsearch.search.aggregations.AggregationPhase.preProcess(AggregationPhase.java:85)\r\n2017-02-02 16:09:32.130114500 at org.elasticsearch.search.query.QueryPhase.execute(QueryPhase.java:111)\r\n2017-02-02 16:09:32.130115500 at org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase(SearchService.java:372)\r\n2017-02-02 16:09:32.130128500 at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:480)\r\n2017-02-02 16:09:32.130130500 at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryFetchTransportHandler.messageReceived(SearchServiceTransportAction.java:392)\r\n2017-02-02 16:09:32.130131500 at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryFetchTransportHandler.messageReceived(SearchServiceTransportAction.java:389)\r\n2017-02-02 16:09:32.130137500 at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\r\n2017-02-02 16:09:32.130139500 at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:77)\r\n2017-02-02 16:09:32.130140500 at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:378)\r\n2017-02-02 16:09:32.130144500 at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\r\n2017-02-02 16:09:32.130145500 at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\r\n2017-02-02 16:09:32.130147500 at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\r\n2017-02-02 16:09:32.130148500 at java.lang.Thread.run(Thread.java:745)\r\n2017-02-02 16:09:32.130152500 \r\n```", "created_at": "2017-02-03T09:40:29Z" }, { "body": "So this might be a class initialization deadlock actually. Can you provide us with the entire jstack output?\r\n\r\nEDIT: I had not seen the two above messages where you already mentioned the fact it could be a class init deadlock.", "created_at": "2017-02-03T09:49:03Z" }, { "body": "I think moving the `EMPTY` constant to the `WithOrdinals` class rather than its `Bytes` parent class would fix the problem as the `Bytes` class would no longer depend on initializing its `WithOrdinals` subclass.", "created_at": "2017-02-03T10:05:09Z" }, { "body": "Sure, here is full thread dumps from 2.4.3: https://gist.github.com/maxcom/69d54d58284a7b5eea42db363bac5f6a", "created_at": "2017-02-03T10:27:23Z" }, { "body": "Closing as this bug does not exist in 5.x and will be fixed in the upcoming 2.4 release (for which we have no ETA at the moment). Thanks @maxcom for the detailed bug report and being so reactive helping us understand what was happening.", "created_at": "2017-02-06T12:51:55Z" } ], "number": 22952, "title": "Deadlock on aggregation in 2.x" }
{ "body": "This issue had already been addressed on 5.x/master.\r\n\r\nCloses #22952", "number": 22994, "review_comments": [], "title": "Avoid classloading deadlock in ValuesSource." }
{ "commits": [ { "message": "Avoid classloading deadlock in ValuesSource.\n\nCloses #22952" } ], "files": [ { "diff": "@@ -77,7 +77,7 @@ public <VS extends ValuesSource> VS valuesSource(ValuesSourceConfig<VS> config,\n } else if (ValuesSource.class.isAssignableFrom(config.valueSourceType)\n || ValuesSource.Bytes.class.isAssignableFrom(config.valueSourceType)\n || ValuesSource.Bytes.WithOrdinals.class.isAssignableFrom(config.valueSourceType)) {\n- vs = (VS) ValuesSource.Bytes.EMPTY;\n+ vs = (VS) ValuesSource.Bytes.WithOrdinals.EMPTY;\n } else {\n throw new SearchParseException(searchContext, \"Can't deal with unmapped ValuesSource type \" + config.valueSourceType, null);\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java", "status": "modified" }, { "diff": "@@ -63,25 +63,6 @@ public boolean needsScores() {\n \n public static abstract class Bytes extends ValuesSource {\n \n- public static final WithOrdinals EMPTY = new WithOrdinals() {\n-\n- @Override\n- public RandomAccessOrds ordinalsValues(LeafReaderContext context) {\n- return DocValues.emptySortedSet();\n- }\n-\n- @Override\n- public RandomAccessOrds globalOrdinalsValues(LeafReaderContext context) {\n- return DocValues.emptySortedSet();\n- }\n-\n- @Override\n- public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOException {\n- return org.elasticsearch.index.fielddata.FieldData.emptySortedBinary(context.reader().maxDoc());\n- }\n-\n- };\n-\n @Override\n public Bits docsWithValue(LeafReaderContext context) throws IOException {\n final SortedBinaryDocValues bytes = bytesValues(context);\n@@ -94,6 +75,25 @@ public Bits docsWithValue(LeafReaderContext context) throws IOException {\n \n public static abstract class WithOrdinals extends Bytes {\n \n+ public static final WithOrdinals EMPTY = new WithOrdinals() {\n+\n+ @Override\n+ public RandomAccessOrds ordinalsValues(LeafReaderContext context) {\n+ return DocValues.emptySortedSet();\n+ }\n+\n+ @Override\n+ public RandomAccessOrds globalOrdinalsValues(LeafReaderContext context) {\n+ return DocValues.emptySortedSet();\n+ }\n+\n+ @Override\n+ public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOException {\n+ return org.elasticsearch.index.fielddata.FieldData.emptySortedBinary(context.reader().maxDoc());\n+ }\n+\n+ };\n+\n @Override\n public Bits docsWithValue(LeafReaderContext context) {\n final RandomAccessOrds ordinals = ordinalsValues(context);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java", "status": "modified" } ] }
{ "body": "This blows up on the `Debug.explain`.\r\n\r\n```\r\nDELETE /test\r\n\r\nPUT /test\r\n{\r\n \"settings\": {\r\n \"number_of_replicas\": 0,\r\n \"number_of_shards\": 2\r\n },\r\n \"mappings\": {\r\n \"test\": {\r\n \"properties\": {\r\n \"k\": {\r\n \"type\": \"keyword\"\r\n },\r\n \"dc\": {\r\n \"type\": \"keyword\"\r\n },\r\n \"d\": {\r\n \"type\": \"date\",\r\n \"format\": \"yyyy-MM-dd\"\r\n },\r\n \"v\": {\r\n \"type\": \"long\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPOST /test/test\r\n{\"k\": \"a\", \"dc\": \"dc1\", \"d\": \"2017-01-01\", \"v\": 1}\r\nPOST /test/test\r\n{\"k\": \"a\", \"dc\": \"dc1\", \"d\": \"2017-01-02\", \"v\": 2}\r\nPOST /test/test\r\n{\"k\": \"b\", \"dc\": \"dc1\", \"d\": \"2017-01-01\", \"v\": 3}\r\nPOST /test/test\r\n{\"k\": \"c\", \"dc\": \"dc2\", \"d\": \"2017-01-01\", \"v\": 4}\r\nPOST /test/test\r\n{\"k\": \"c\", \"dc\": \"dc2\", \"d\": \"2017-01-02\", \"v\": 5}\r\nPOST /test/_refresh\r\n\r\nPOST /test/_search?size=0\r\n{\r\n \"aggs\": {\r\n \"dc\": {\r\n \"terms\": {\r\n \"field\": \"dc\"\r\n },\r\n \"aggs\": {\r\n \"k\": {\r\n \"terms\": {\"field\": \"k\"},\r\n \"aggs\": {\r\n \"k\": {\r\n \"top_hits\": {\r\n \"size\": 1,\r\n \"sort\": [\r\n {\r\n \"d\": {\r\n \"order\": \"desc\"\r\n }\r\n }\r\n ]\r\n }\r\n },\r\n \"last_k\": {\r\n \"scripted_metric\": {\r\n \"init_script\": \"params._agg.v = 0; params._agg.t = 0\",\r\n \"map_script\": \"if (doc.d.value > params._agg.t) {params._agg.t = doc.d.value; params._agg.v = doc.v}\",\r\n \"reduce_script\": \"params._aggs.stream().reduce((lhs, rhs) -> lhs.t > rhs.t ? lhs.v : rhs.v).map(agg -> Debug.explain(agg)).orElse(0)\", \r\n \"combine_script\": \"params._agg\"\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n```", "comments": [ { "body": "I mean, it should blow up, but it blows up with a verify error:\r\n```\r\n \"reason\": \"Operand stack underflow\\nException Details:\\n Location:\\n org/elasticsearch/painless/Executable$Script.lambda$1(Ljava/lang/Object;)Ljava/lang/Object; @4: areturn\\n Reason:\\n Attempt to pop empty stack.\\n Current Frame:\\n bci: @4\\n flags: { }\\n locals: { 'java/lang/Object' }\\n stack: { }\\n Bytecode:\\n 0x0000000: 2ab8 0038 b0 \\n\"\r\n```", "created_at": "2017-02-01T16:56:30Z" }, { "body": "I think this may be the same issue that @polyfractal had at some point when he was working with Painless for a different project. I'll have to check back through our conversations to see if I can find when he mentioned it.", "created_at": "2017-02-01T21:10:54Z" }, { "body": "I was actually getting something slightly different. I can't seem to find the full stack trace, but from our chat it was:\r\n\r\n```\r\nScriptException[runtime error\r\n]; nested: PainlessExplainError;\r\n```\r\n\r\nI'll see if I can recreate it for a proper bug-report, but I think it was something unrelated", "created_at": "2017-02-01T22:07:13Z" }, { "body": "> PainlessExplainError\r\n\r\nThat error *shouldn't* escape the node. It is supposed to be an implementation detail of `Debug.explain`. Uncatchable, invisible, and final. I was using it a bunch today and, other than this issue it was working pretty well.", "created_at": "2017-02-02T02:35:00Z" }, { "body": "I've tracked this down a bit further. The following painless script is broken:\r\n```\r\ndef test(StringBuilder b, int i) {\r\n return b.setLength(i)\r\n}\r\ntest(new StringBuilder(), 1)\r\n```\r\n\r\nI've almost found the right place to fix.", "created_at": "2017-02-03T20:03:38Z" } ], "number": 22908, "title": "Painless code generation error when returning nothing from a Function" }
{ "body": "Painless can cast anything into the magic type `def` but it\r\nreally shouldn't try to cast **nothing** into `def`. That causes\r\nthe byte code generation library to freak out a little.\r\n\r\nCloses #22908\r\n", "number": 22969, "review_comments": [ { "body": "I'm paranoid about order of operations here.", "created_at": "2017-02-03T20:22:28Z" }, { "body": "This one line is the actual fix.", "created_at": "2017-02-03T20:22:33Z" }, { "body": "I added this because I was frustrated that I couldn't see what a `Variable` was while debugging.", "created_at": "2017-02-03T20:23:02Z" }, { "body": "I was tired of the garbage error messages these were making", "created_at": "2017-02-03T20:23:26Z" }, { "body": "None of these methods actually had the problem.", "created_at": "2017-02-03T20:23:51Z" }, { "body": "These were fine.", "created_at": "2017-02-03T20:24:32Z" }, { "body": "This one was broken.", "created_at": "2017-02-03T20:24:37Z" }, { "body": "It had a VerificationError.", "created_at": "2017-02-03T20:24:47Z" }, { "body": "This one was broken - it had a VerificationError.", "created_at": "2017-02-03T20:25:06Z" }, { "body": "This one worked.", "created_at": "2017-02-03T20:25:10Z" }, { "body": "None of this is new behavior.", "created_at": "2017-02-03T20:25:27Z" }, { "body": "Is there a specific case you're worried about?", "created_at": "2017-02-03T21:08:44Z" }, { "body": "No. I just can never remember if `&&` comes before `||` so I always use parentheses to make them clear to me.", "created_at": "2017-02-03T21:24:08Z" }, { "body": "Ahh. I completely missed the parenthesis had been added.", "created_at": "2017-02-03T21:31:17Z" } ], "title": "Don't allow casting from void to def in painless" }
{ "commits": [ { "message": "Painless: Don't allow casting from void to def\n\nPainless can cast anything into the magic type `def` but it\nreally shouldn't try to cast **nothing** into `def`. That causes\nthe byte code generation library to freak out a little.\n\nCloses #22908" } ], "files": [ { "diff": "@@ -722,9 +722,10 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b\n break;\n }\n \n- if (actual.sort == Sort.DEF || expected.sort == Sort.DEF ||\n- expected.clazz.isAssignableFrom(actual.clazz) ||\n- explicit && actual.clazz.isAssignableFrom(expected.clazz)) {\n+ if ( actual.sort == Sort.DEF\n+ || (actual.sort != Sort.VOID && expected.sort == Sort.DEF)\n+ || expected.clazz.isAssignableFrom(actual.clazz)\n+ || (explicit && actual.clazz.isAssignableFrom(expected.clazz))) {\n return new Cast(actual, expected, explicit);\n } else {\n throw location.createError(new ClassCastException(\"Cannot cast from [\" + actual.name + \"] to [\" + expected.name + \"].\"));", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java", "status": "modified" }, { "diff": "@@ -318,6 +318,19 @@ public Variable(Location location, String name, Type type, int slot, boolean rea\n public int getSlot() {\n return slot;\n }\n+\n+ @Override\n+ public String toString() {\n+ StringBuilder b = new StringBuilder();\n+ b.append(\"Variable[type=\").append(type);\n+ b.append(\",name=\").append(name);\n+ b.append(\",slot=\").append(slot);\n+ if (readonly) {\n+ b.append(\",readonly\");\n+ }\n+ b.append(']');\n+ return b.toString();\n+ }\n }\n \n public static final class Parameter {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java", "status": "modified" }, { "diff": "@@ -22,7 +22,10 @@\n import org.joda.time.DateTime;\n import org.joda.time.DateTimeZone;\n \n+import java.lang.invoke.LambdaConversionException;\n+\n import static java.util.Collections.singletonMap;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.endsWith;\n import static org.hamcrest.Matchers.startsWith;\n \n@@ -208,7 +211,7 @@ public void testNotFunctionalInterface() {\n IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n exec(\"List l = new ArrayList(); l.add(2); l.add(1); l.add(Integer::bogus); return l.get(0);\");\n });\n- assertTrue(expected.getMessage().contains(\"Cannot convert function reference\"));\n+ assertThat(expected.getMessage(), containsString(\"Cannot convert function reference\"));\n }\n \n public void testIncompatible() {\n@@ -221,7 +224,7 @@ public void testWrongArity() {\n IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n exec(\"Optional.empty().orElseGet(String::startsWith);\");\n });\n- assertTrue(expected.getMessage().contains(\"Unknown reference\"));\n+ assertThat(expected.getMessage(), containsString(\"Unknown reference\"));\n }\n \n public void testWrongArityNotEnough() {\n@@ -235,13 +238,38 @@ public void testWrongArityDef() {\n IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n exec(\"def y = Optional.empty(); return y.orElseGet(String::startsWith);\");\n });\n- assertTrue(expected.getMessage().contains(\"Unknown reference\"));\n+ assertThat(expected.getMessage(), containsString(\"Unknown reference\"));\n }\n \n public void testWrongArityNotEnoughDef() {\n IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n exec(\"def l = new ArrayList(); l.add(2); l.add(1); l.sort(String::isEmpty);\");\n });\n- assertTrue(expected.getMessage().contains(\"Unknown reference\"));\n+ assertThat(expected.getMessage(), containsString(\"Unknown reference\"));\n+ }\n+\n+ public void testReturnVoid() {\n+ Throwable expected = expectScriptThrows(BootstrapMethodError.class, () -> {\n+ exec(\"StringBuilder b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(b::setLength);\");\n+ });\n+ assertThat(expected.getCause().getMessage(),\n+ containsString(\"Type mismatch for lambda expected return: void is not convertible to long\"));\n+ }\n+\n+ public void testReturnVoidDef() {\n+ Exception expected = expectScriptThrows(LambdaConversionException.class, () -> {\n+ exec(\"StringBuilder b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(b::setLength);\");\n+ });\n+ assertThat(expected.getMessage(), containsString(\"Type mismatch for lambda expected return: void is not convertible to long\"));\n+\n+ expected = expectScriptThrows(LambdaConversionException.class, () -> {\n+ exec(\"def b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(b::setLength);\");\n+ });\n+ assertThat(expected.getMessage(), containsString(\"Type mismatch for lambda expected return: void is not convertible to long\"));\n+\n+ expected = expectScriptThrows(LambdaConversionException.class, () -> {\n+ exec(\"def b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(b::setLength);\");\n+ });\n+ assertThat(expected.getMessage(), containsString(\"Type mismatch for lambda expected return: void is not convertible to long\"));\n }\n }", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java", "status": "modified" }, { "diff": "@@ -19,6 +19,8 @@\n \n package org.elasticsearch.painless;\n \n+import static org.hamcrest.Matchers.containsString;\n+\n public class FunctionTests extends ScriptTestCase {\n public void testBasic() {\n assertEquals(5, exec(\"int get() {5;} get()\"));\n@@ -49,21 +51,37 @@ public void testEmpty() {\n Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n exec(\"void test(int x) {} test()\");\n });\n- assertTrue(expected.getMessage().contains(\"Cannot generate an empty function\"));\n+ assertThat(expected.getMessage(), containsString(\"Cannot generate an empty function\"));\n }\n \n public void testDuplicates() {\n Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n exec(\"void test(int x) {x = 2;} void test(def y) {y = 3;} test()\");\n });\n- assertTrue(expected.getMessage().contains(\"Duplicate functions\"));\n+ assertThat(expected.getMessage(), containsString(\"Duplicate functions\"));\n }\n \n public void testInfiniteLoop() {\n Error expected = expectScriptThrows(PainlessError.class, () -> {\n exec(\"void test() {boolean x = true; while (x) {}} test()\");\n });\n- assertTrue(expected.getMessage().contains(\n- \"The maximum number of statements that can be executed in a loop has been reached.\"));\n+ assertThat(expected.getMessage(),\n+ containsString(\"The maximum number of statements that can be executed in a loop has been reached.\"));\n+ }\n+\n+ public void testReturnVoid() {\n+ assertEquals(null, exec(\"void test(StringBuilder b, int i) {b.setLength(i)} test(new StringBuilder(), 1)\"));\n+ Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> {\n+ exec(\"int test(StringBuilder b, int i) {b.setLength(i)} test(new StringBuilder(), 1)\");\n+ });\n+ assertEquals(\"Not all paths provide a return value for method [test].\", expected.getMessage());\n+ expected = expectScriptThrows(ClassCastException.class, () -> {\n+ exec(\"int test(StringBuilder b, int i) {return b.setLength(i)} test(new StringBuilder(), 1)\");\n+ });\n+ assertEquals(\"Cannot cast from [void] to [int].\", expected.getMessage());\n+ expected = expectScriptThrows(ClassCastException.class, () -> {\n+ exec(\"def test(StringBuilder b, int i) {return b.setLength(i)} test(new StringBuilder(), 1)\");\n+ });\n+ assertEquals(\"Cannot cast from [void] to [def].\", expected.getMessage());\n }\n }", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java", "status": "modified" }, { "diff": "@@ -19,9 +19,12 @@\n \n package org.elasticsearch.painless;\n \n+import java.util.Arrays;\n import java.util.HashMap;\n import java.util.Map;\n \n+import static org.hamcrest.Matchers.containsString;\n+\n public class LambdaTests extends ScriptTestCase {\n \n public void testNoArgLambda() {\n@@ -231,4 +234,25 @@ public void testReservedCapture() {\n assertEquals(false, exec(compare + \"compare(() -> { if (params['number'] == 1) { return params['number'] }\" +\n \"else { return params['key'] } }, 2)\", params, true));\n }\n+\n+ public void testReturnVoid() {\n+ Throwable expected = expectScriptThrows(ClassCastException.class, () -> {\n+ exec(\"StringBuilder b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(i -> b.setLength(i))\");\n+ });\n+ assertThat(expected.getMessage(), containsString(\"Cannot cast from [void] to [long].\"));\n+ }\n+\n+ public void testReturnVoidDef() {\n+ // If we can catch the error at compile time we do\n+ Exception expected = expectScriptThrows(ClassCastException.class, () -> {\n+ exec(\"StringBuilder b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(i -> b.setLength(i))\");\n+ });\n+ assertThat(expected.getMessage(), containsString(\"Cannot cast from [void] to [def].\"));\n+\n+ // Otherwise we convert the void into a null\n+ assertEquals(Arrays.asList(null, null),\n+ exec(\"def b = new StringBuilder(); def l = [1, 2]; l.stream().map(i -> b.setLength(i)).collect(Collectors.toList())\"));\n+ assertEquals(Arrays.asList(null, null),\n+ exec(\"def b = new StringBuilder(); List l = [1, 2]; l.stream().map(i -> b.setLength(i)).collect(Collectors.toList())\"));\n+ }\n }", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java", "status": "modified" } ] }
{ "body": "\r\n**Environment**:\r\nReindexing from ES 1.7.5 -> ES 5.2.0\r\n\r\n**Description**:\r\n\r\nThis error message is output repeatedly during reindex. The reindex can complete successfully, but perhaps not efficiently.\r\n\r\n```\r\n2017-02-02T13:02:16,867][WARN ][o.e.i.r.TransportReindexAction] [CBuuMNt] Failed to clear scroll [c2NhbjswOzE7dG90YWxfaGl0czoyOw==]\r\norg.elasticsearch.client.ResponseException: DELETE http://127.0.0.1:9201/_search/scroll: HTTP/1.1 404 Not Found\r\n{}\r\n\tat org.elasticsearch.client.RestClient$1.completed(RestClient.java:311) [rest-5.2.0.jar:5.2.0]\r\n\tat org.elasticsearch.client.RestClient$1.completed(RestClient.java:300) [rest-5.2.0.jar:5.2.0]\r\n\tat org.apache.http.concurrent.BasicFuture.completed(BasicFuture.java:119) [httpcore-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.client.DefaultClientExchangeHandlerImpl.responseCompleted(DefaultClientExchangeHandlerImpl.java:177) [httpasyncclient-4.1.2.jar:4.1.2]\r\n\tat org.apache.http.nio.protocol.HttpAsyncRequestExecutor.processResponse(HttpAsyncRequestExecutor.java:436) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.nio.protocol.HttpAsyncRequestExecutor.inputReady(HttpAsyncRequestExecutor.java:326) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.DefaultNHttpClientConnection.consumeInput(DefaultNHttpClientConnection.java:265) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.client.InternalIODispatch.onInputReady(InternalIODispatch.java:81) [httpasyncclient-4.1.2.jar:4.1.2]\r\n\tat org.apache.http.impl.nio.client.InternalIODispatch.onInputReady(InternalIODispatch.java:39) [httpasyncclient-4.1.2.jar:4.1.2]\r\n\tat org.apache.http.impl.nio.reactor.AbstractIODispatch.inputReady(AbstractIODispatch.java:114) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.reactor.BaseIOReactor.readable(BaseIOReactor.java:162) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.reactor.AbstractIOReactor.processEvent(AbstractIOReactor.java:337) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.reactor.AbstractIOReactor.processEvents(AbstractIOReactor.java:315) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.reactor.AbstractIOReactor.execute(AbstractIOReactor.java:276) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.reactor.BaseIOReactor.execute(BaseIOReactor.java:104) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat org.apache.http.impl.nio.reactor.AbstractMultiworkerIOReactor$Worker.run(AbstractMultiworkerIOReactor.java:588) [httpcore-nio-4.4.5.jar:4.4.5]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_60]\r\n```\r\n\r\n**Notable**:\r\n- Problem does not happen with 1.x -> 5.1.x\r\n- Problem does not happen with 2.x -> 5.2.0", "comments": [ { "body": "I saw it today. This comes from a fix we did in 5.2 where we were more careful to actually clear the scroll, I think. I'll investigate, but I believe it can safely be ignored.", "created_at": "2017-02-02T18:13:13Z" } ], "number": 22937, "title": "\"Failed to clear scroll\" when reindexing from 1.x to 5.2.0" }
{ "body": "Versions of Elasticsearch prior to 2.0 would return a scroll id\r\neven with the last scroll response. They'd then automatically\r\nclear the scroll because it is empty. When terminating reindex\r\nwill attempt to clear the last scroll it received, regardless of\r\nthe remote version. This quiets the warning when the scroll cannot\r\nbe cleared for versions before 2.0.\r\n\r\nCloses #22937\r\n", "number": 22942, "review_comments": [], "title": "Reindex: do not log when can't clear old scroll" }
{ "commits": [ { "message": "Reindex: do not log when can't clear old scroll\n\nVersions of Elasticsearch prior to 2.0 would return a scroll id\neven with the last scroll response. They'd then automatically\nclear the scroll because it is empty. When terminating reindex\nwill attempt to clear the last scroll it received, regardless of\nthe remote version. This quiets the warning when the scroll cannot\nbe cleared for versions before 2.0.\n\nCloses #22937" } ], "files": [ { "diff": "@@ -118,10 +118,23 @@ public void onSuccess(org.elasticsearch.client.Response response) {\n }\n \n @Override\n- public void onFailure(Exception t) {\n- logger.warn((Supplier<?>) () -> new ParameterizedMessage(\"Failed to clear scroll [{}]\", scrollId), t);\n+ public void onFailure(Exception e) {\n+ logFailure(e);\n onCompletion.run();\n }\n+\n+ private void logFailure(Exception e) {\n+ if (e instanceof ResponseException) {\n+ ResponseException re = (ResponseException) e;\n+ if (remoteVersion.before(Version.V_2_0_0) && re.getResponse().getStatusLine().getStatusCode() == 404) {\n+ logger.debug((Supplier<?>) () -> new ParameterizedMessage(\n+ \"Failed to clear scroll [{}] from pre-2.0 Elasticsearch. This is normal if the request terminated \"\n+ + \"normally as the scroll has already been cleared automatically.\", scrollId), e);\n+ return;\n+ }\n+ }\n+ logger.warn((Supplier<?>) () -> new ParameterizedMessage(\"Failed to clear scroll [{}]\", scrollId), e);\n+ }\n });\n }\n \n@@ -132,7 +145,7 @@ protected void cleanup() {\n threadPool.generic().submit(() -> {\n try {\n client.close();\n- logger.info(\"Shut down remote connection\");\n+ logger.debug(\"Shut down remote connection\");\n } catch (IOException e) {\n logger.error(\"Failed to shutdown the remote connection\", e);\n }", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.2.0\r\n\r\n**Plugins installed**: [x-pack]\r\n\r\n**JVM version**: java version \"1.8.0_73\"\r\n\r\n**OS version**: OSX\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n`elasticsearch-plugin remove` does not fail gracefully, because it does not check for the presence of a `plugin-id`.\r\n\r\n```\r\nelasticsearch-5.2.0/bin/elasticsearch-plugin remove\r\n-> Removing ...\r\nException in thread \"main\" java.lang.NullPointerException\r\n\tat sun.nio.fs.UnixPath.normalizeAndCheck(UnixPath.java:77)\r\n\tat sun.nio.fs.UnixPath.<init>(UnixPath.java:71)\r\n\tat sun.nio.fs.UnixFileSystem.getPath(UnixFileSystem.java:281)\r\n\tat sun.nio.fs.AbstractPath.resolve(AbstractPath.java:53)\r\n\tat org.elasticsearch.plugins.RemovePluginCommand.execute(RemovePluginCommand.java:68)\r\n\tat org.elasticsearch.plugins.RemovePluginCommand.execute(RemovePluginCommand.java:59)\r\n\tat org.elasticsearch.cli.SettingCommand.execute(SettingCommand.java:54)\r\n\tat org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:122)\r\n\tat org.elasticsearch.cli.MultiCommand.execute(MultiCommand.java:69)\r\n\tat org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:122)\r\n\tat org.elasticsearch.cli.Command.main(Command.java:88)\r\n\tat org.elasticsearch.plugins.PluginCli.main(PluginCli.java:47)\r\n```\r\n\r\nMeanwhile, these use cases all fail with a good error message:\r\n`elasticsearch-5.2.0/bin/elasticsearch-plugin install`\r\n`elasticsearch-5.2.0/bin/elasticsearch-plugin remove does-not-exist`\r\n`elasticsearch-5.2.0/bin/elasticsearch-plugin invalid-action`", "comments": [], "number": 22922, "title": "elasticsearch-plugin remove without plugin name fails ungracefully" }
{ "body": "closes #22922", "number": 22935, "review_comments": [], "title": "Plugins: Make remove command resilient to missing plugin name" }
{ "commits": [ { "message": "Plugins: Make remove command resilient to missing plugin name\n\ncloses #22922" } ], "files": [ { "diff": "@@ -57,6 +57,9 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th\n \n // pkg private for testing\n void execute(Terminal terminal, String pluginName, Environment env) throws Exception {\n+ if (pluginName == null) {\n+ throw new UserException(ExitCodes.USAGE, \"A plugin name must be provided\");\n+ }\n terminal.println(\"-> Removing \" + Strings.coalesceToEmpty(pluginName) + \"...\");\n \n final Path pluginDir = env.pluginsFile().resolve(pluginName);", "filename": "core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java", "status": "modified" }, { "diff": "@@ -78,6 +78,12 @@ static void assertRemoveCleaned(Environment env) throws IOException {\n }\n \n public void testMissing() throws Exception {\n+ UserException e = expectThrows(UserException.class, () -> removePlugin(null, home));\n+ assertTrue(e.getMessage(), e.getMessage().contains(\"A plugin name must be provided\"));\n+ assertRemoveCleaned(env);\n+ }\n+\n+ public void testUnknown() throws Exception {\n UserException e = expectThrows(UserException.class, () -> removePlugin(\"dne\", home));\n assertTrue(e.getMessage(), e.getMessage().contains(\"plugin dne not found\"));\n assertRemoveCleaned(env);", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java", "status": "modified" } ] }
{ "body": "\r\n - Occurs when reindexing from ES 1.7.5 -> 5.2\r\n - Does not occur with ES 1.7.5 -> 5.1.1\r\n\r\nSteps to reproduce:\r\n\r\n1. Index data into 1.7.5:\r\n```\r\nPOST /i/d?pretty\r\n{\r\n \"test\": \"doc\"\r\n}\r\n```\r\n\r\n2. Spin up 5.2.0 and run reindex:\r\n\r\n```\r\nreindex.remote.whitelist: [\"127.0.0.1:9201\"]\r\n```\r\n\r\n```\r\nPOST /_reindex?wait_for_completion&pretty\r\n{\r\n \"conflicts\": \"proceed\",\r\n \"source\": {\r\n \"remote\": {\r\n \"host\": \"http://127.0.0.1:9201\"\r\n },\r\n \"index\": \"i\",\r\n \"type\": [\r\n \"d\"\r\n ]\r\n },\r\n \"dest\": {\r\n \"index\": \"i2\"\r\n }\r\n}\r\n```\r\n\r\nResults in:\r\n\r\n```\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"[i][d][AVn1_zFoaRGPrSCAf9Gw] didn't store _source\"\r\n }\r\n ],\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"[i][d][AVn1_zFoaRGPrSCAf9Gw] didn't store _source\"\r\n },\r\n \"status\": 400\r\n}\r\n```\r\n\r\n\r\n\r\n", "comments": [ { "body": "It's a side effect of a bug fix #22507, reindex does not explicitly request `_source` in the remote index (automatically add `_source:true` in the source like in 5.1). Though for cluster older than 2.0.0 we automatically add metadata stored fields (`ttl`, `parent`, ...) to retrieve. The combo makes a request that filters _source from the response.\r\nThe workaround is to add `\"_source\":true` in `source` ;) and it works fine but it should not be a requirement. Although we'll have to document this behavior for the 5.2.0 timeframe.\r\n@nik9000 any thoughts ?\r\n\r\n", "created_at": "2017-01-31T20:49:53Z" }, { "body": "> @nik9000 any thoughts ?\r\n\r\nI'm happy to put together a fix sometime in the next few days. I'll have to dig up my old versions of elasticsearch and have a look.", "created_at": "2017-01-31T20:52:24Z" }, { "body": "I try to reindex from 1.7.4 to 6.1, and I encounter this error. The documents are stored without _source on purpose. I just wonder if the reindex should work, or if it's impossible to do such a reindex.", "created_at": "2017-12-15T10:10:31Z" }, { "body": "No if you don’t have source you can’t reindex (which reads source).\r\n", "created_at": "2017-12-15T10:21:37Z" } ], "number": 22893, "title": "\"didn't store _source\" when reindexing into ES 5.2 " }
{ "body": "In 5.2 we stopped sending the source parameter if the user didn't\r\nspecify it. This was a mistake as versions before 2.0 look like\r\nthey don't always include the `_source`. This is because reindex\r\nrequests some metadata fields. Anyway, now we say `\"_source\": true`\r\nif there isn't a `_source` configured in the reindex request.\r\n\r\nCloses #22893\r\n", "number": 22931, "review_comments": [], "title": "Fix reindex-from-remote from <2.0" }
{ "commits": [ { "message": "Reindex: fix reindex-from-remote from <2.0\n\nIn 5.2 we stopped sending the source parameter if the user didn't\nspecify it. This was a mistake as versions before 2.0 look like\nthey don't always include the `_source`. This is because reindex\nrequests some metadata fields. Anyway, now we say `\"_source\": true`\nif there isn't a `_source` configured in the reindex request.\n\nCloses #22893" } ], "files": [ { "diff": "@@ -124,6 +124,8 @@ static HttpEntity initialSearchEntity(SearchRequest searchRequest, BytesReferenc\n \n if (searchRequest.source().fetchSource() != null) {\n entity.field(\"_source\", searchRequest.source().fetchSource());\n+ } else {\n+ entity.field(\"_source\", true);\n }\n \n entity.endObject();", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java", "status": "modified" }, { "diff": "@@ -160,7 +160,7 @@ public void testInitialSearchEntity() throws IOException {\n String query = \"{\\\"match_all\\\":{}}\";\n HttpEntity entity = initialSearchEntity(searchRequest, new BytesArray(query));\n assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue());\n- assertEquals(\"{\\\"query\\\":\" + query + \"}\",\n+ assertEquals(\"{\\\"query\\\":\" + query + \",\\\"_source\\\":true}\",\n Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)));\n \n // Source filtering is included if set up", "filename": "modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.2.0\r\n\r\n**Plugins installed**: [x-pack]\r\n\r\n**JVM version**: java version \"1.8.0_73\"\r\n\r\n**OS version**: OSX\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n`elasticsearch-plugin remove` does not fail gracefully, because it does not check for the presence of a `plugin-id`.\r\n\r\n```\r\nelasticsearch-5.2.0/bin/elasticsearch-plugin remove\r\n-> Removing ...\r\nException in thread \"main\" java.lang.NullPointerException\r\n\tat sun.nio.fs.UnixPath.normalizeAndCheck(UnixPath.java:77)\r\n\tat sun.nio.fs.UnixPath.<init>(UnixPath.java:71)\r\n\tat sun.nio.fs.UnixFileSystem.getPath(UnixFileSystem.java:281)\r\n\tat sun.nio.fs.AbstractPath.resolve(AbstractPath.java:53)\r\n\tat org.elasticsearch.plugins.RemovePluginCommand.execute(RemovePluginCommand.java:68)\r\n\tat org.elasticsearch.plugins.RemovePluginCommand.execute(RemovePluginCommand.java:59)\r\n\tat org.elasticsearch.cli.SettingCommand.execute(SettingCommand.java:54)\r\n\tat org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:122)\r\n\tat org.elasticsearch.cli.MultiCommand.execute(MultiCommand.java:69)\r\n\tat org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:122)\r\n\tat org.elasticsearch.cli.Command.main(Command.java:88)\r\n\tat org.elasticsearch.plugins.PluginCli.main(PluginCli.java:47)\r\n```\r\n\r\nMeanwhile, these use cases all fail with a good error message:\r\n`elasticsearch-5.2.0/bin/elasticsearch-plugin install`\r\n`elasticsearch-5.2.0/bin/elasticsearch-plugin remove does-not-exist`\r\n`elasticsearch-5.2.0/bin/elasticsearch-plugin invalid-action`", "comments": [], "number": 22922, "title": "elasticsearch-plugin remove without plugin name fails ungracefully" }
{ "body": "Hi,\r\n\r\nAdded check for null plugin name passed to removePlugin.\r\n\r\nAlso renamed pluginId to pluginName in install for consistency with docs.\r\nhttps://www.elastic.co/guide/en/elasticsearch/plugins/current/listing-removing.html\r\n\r\nAppears to fix issue when I recreated the distribution. Let me know your thoughts.\r\n\r\nCloses #22922 ", "number": 22930, "review_comments": [ { "body": "Please revert the name change. It's fine as is, and the name change here isn't thorough enough (there are methods invoked here that still have the parameter named `pluginId`).", "created_at": "2017-02-02T14:22:46Z" }, { "body": "Just the variable name correct?", "created_at": "2017-02-02T15:19:00Z" }, { "body": "No, it should stay \"id\" in the message because plugins are installed by id (with the exception of some special plugins that can be installed by name only). Yet \"name\" is fine for removal because plugins are removed by name.", "created_at": "2017-02-02T15:30:47Z" }, { "body": "It is called pluginId in install because it is an identifier, which _may_ be a plugin name, but it also may be maven coordinates or a url.", "created_at": "2017-02-02T17:54:59Z" }, { "body": "Ok my mistake, originally saw https://www.elastic.co/guide/en/elasticsearch/plugins/current/installation.html for install and thought 'name' was clearer for the output message.\r\n\r\nShould these commits be squashed?", "created_at": "2017-02-02T19:03:24Z" }, { "body": "No need to squash, we can do it on merge.", "created_at": "2017-02-02T19:34:20Z" } ], "title": "Add check for null pluginName in remove command" }
{ "commits": [ { "message": "Add check for null pluginName in remove command" }, { "message": "change pluginName to pluginId in InstallPluginCommand" }, { "message": "Change plugin name to plugin id in install message" } ], "files": [ { "diff": "@@ -57,6 +57,10 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th\n \n // pkg private for testing\n void execute(Terminal terminal, String pluginName, Environment env) throws Exception {\n+ if (pluginName == null) {\n+ throw new UserException(ExitCodes.USAGE, \"plugin name is required\");\n+ }\n+\n terminal.println(\"-> Removing \" + Strings.coalesceToEmpty(pluginName) + \"...\");\n \n final Path pluginDir = env.pluginsFile().resolve(pluginName);", "filename": "core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java", "status": "modified" }, { "diff": "@@ -153,6 +153,12 @@ protected boolean addShutdownHook() {\n }\n }\n \n+ public void testMissingPluginName() throws Exception {\n+ UserException e = expectThrows(UserException.class, () -> removePlugin(null, home));\n+ assertEquals(ExitCodes.USAGE, e.exitCode);\n+ assertEquals(\"plugin name is required\", e.getMessage());\n+ }\n+\n private String expectedConfigDirPreservedMessage(final Path configDir) {\n return \"-> Preserving plugin config files [\" + configDir + \"] in case of upgrade, delete manually if not needed\";\n }", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java", "status": "modified" } ] }
{ "body": "When dynamically updating mappings, these fields can cause the document parser to throw an NPE since the parent mapper doesn't exist for the line:\r\n\r\n```\r\nObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper);\r\n```\r\n\r\nCausing this exception:\r\n\r\n```\r\nCaused by: java.lang.NullPointerException\r\n\tat org.elasticsearch.index.mapper.DocumentParser.createUpdate(DocumentParser.java:309) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.mapper.DocumentParser.createDynamicUpdate(DocumentParser.java:190) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.mapper.DocumentParser.parseDocument(DocumentParser.java:78) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.mapper.DocumentMapper.parse(DocumentMapper.java:275) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.shard.IndexShard.prepareIndex(IndexShard.java:533) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.shard.IndexShard.prepareIndexOnPrimary(IndexShard.java:510) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.index.TransportIndexAction.prepareIndexOperationOnPrimary(TransportIndexAction.java:174) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary(TransportIndexAction.java:179) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.index.TransportIndexAction.onPrimaryShard(TransportIndexAction.java:144) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.index.TransportIndexAction.onPrimaryShard(TransportIndexAction.java:63) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportWriteAction.shardOperationOnPrimary(TransportWriteAction.java:75) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportWriteAction.shardOperationOnPrimary(TransportWriteAction.java:48) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform(TransportReplicationAction.java:905) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform(TransportReplicationAction.java:875) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:113) ~[elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse(TransportReplicationAction.java:323) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse(TransportReplicationAction.java:258) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse(TransportReplicationAction.java:855) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse(TransportReplicationAction.java:852) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.shard.IndexShardOperationsLock.acquire(IndexShardOperationsLock.java:142) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationLock(IndexShard.java:1648) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference(TransportReplicationAction.java:864) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction.access$400(TransportReplicationAction.java:90) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun(TransportReplicationAction.java:275) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:254) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:246) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:577) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.1.2.jar:5.1.2]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_111]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_111]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_111]\r\n```\r\n\r\nWhich can be reproduced with:\r\n\r\n```\r\nDELETE /i\r\n\r\nPOST /i/d?pretty\r\n{\r\n \"top.\": [\r\n {\r\n \"foo.\": [{\r\n \"thing\": \"bah\"\r\n }]\r\n }\r\n ]\r\n}\r\n\r\nPOST /i/d?pretty\r\n{\r\n \"top.\": [\r\n {\r\n \"foo.\": [{\r\n \"bar.\": {\r\n \"aoeu.\": {\r\n \"a\": 1,\r\n \"b\": 2\r\n },\r\n \"baz\": \"eggplant\"\r\n }\r\n }]\r\n }\r\n ]\r\n}\r\n```", "comments": [ { "body": "This should be all fields, not just dynamic fields.", "created_at": "2017-01-25T23:15:26Z" }, { "body": "Thanks @rjernst, I updated the title", "created_at": "2017-01-25T23:17:48Z" }, { "body": "Another use case;\r\n\r\n```\r\ncurl -XPOST localhost:9200/test-index/testType -d '{\r\n \"top. \": [\r\n {\r\n \"foo.foo\": [{\r\n \"thing\": \"bah\",\r\n \"bar.\": \"barr\"\r\n }]\r\n }\r\n ]\r\n}'\r\n\r\ncurl -XPOST localhost:9200/test-index/testType -d '{\r\n \"top. \": [\r\n {\r\n \"foo.foo\": [{\r\n \"bar.bar\": {\r\n \"aoeu.aeiou\": {\r\n \"a\": 1,\r\n \"b\": 2\r\n },\r\n \"baz\": \"eggplant\"\r\n }\r\n }]\r\n }\r\n ]\r\n}'\r\n```\r\n\r\n**Error Message**\r\n\r\n`{\"error\":{\"root_cause\":[{\"type\":\"mapper_parsing_exception\",\"reason\":\"Could not dynamically add mapping for field [bar.bar]. Existing mapping for [top. .foo.foo.bar] must be of type object but found [text].\"}],\"type\":\"mapper_parsing_exception\",\"reason\":\"Could not dynamically add mapping for field [bar.bar]. Existing mapping for [top. .foo.foo.bar] must be of type object but found [text].\"},\"status\":400}`", "created_at": "2017-01-26T17:56:57Z" }, { "body": "Another use case; \r\n\r\n```\r\ncurl -XPOST localhost:9200/test-index/testType -d '{\r\n \"top. \": [\r\n {\r\n \"foo.foo\": [{\r\n \"thing\": \"bah\",\r\n \"bar.\": \"barr\"\r\n }],\r\n \"foo\": \"1\"\r\n }\r\n ]\r\n}'\r\n\r\n```\r\n\r\n`{\"error\":{\"root_cause\":[{\"type\":\"remote_transport_exception\",\"reason\":\"[localhost][127.0.0.1:9300][indices:data/write/index]\"}],\"type\":\"illegal_argument_exception\",\"reason\":\"mapper [top. .foo] of different type, current_type [text], merged_type [ObjectMapper]\"},\"status\":400}`", "created_at": "2017-01-26T18:00:30Z" } ], "number": 22794, "title": "Reject fields that start with or end with a `.`" }
{ "body": "This disallows object mappings that would accidentally create something like\r\n`foo..bar`, which is then unparsable for the `bar` field as it does not know\r\nwhat its parent is.\r\n\r\nResolves #22794", "number": 22891, "review_comments": [ { "body": "what about splitting first and then checking whether the array contains an empty string? I think that would also cover two other cases we are interested in validating: leading dots and trailing dots? (just thinking out loud)", "created_at": "2017-01-31T21:47:12Z" }, { "body": "can you reduce the visibility of this method?", "created_at": "2017-01-31T21:47:32Z" } ], "title": "Disallow introducing illegal object mappings (double '..')" }
{ "commits": [ { "message": "Disallow introducing illegal object mappings (double '..')\n\nThis disallows object mappings that would accidentally create something like\n`foo..bar`, which is then unparsable for the `bar` field as it does not know\nwhat its parent is.\n\nResolves #22794" } ], "files": [ { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.document.Field;\n import org.apache.lucene.index.IndexableField;\n import org.elasticsearch.Version;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.collect.Tuple;\n import org.elasticsearch.common.joda.FormatDateTimeFormatter;\n import org.elasticsearch.common.xcontent.XContentHelper;\n@@ -172,6 +173,17 @@ private static MapperParsingException wrapInMapperParsingException(SourceToParse\n return new MapperParsingException(\"failed to parse\", e);\n }\n \n+ private static String[] splitAndValidatePath(String fullFieldPath) {\n+ String[] parts = fullFieldPath.split(\"\\\\.\");\n+ for (String part : parts) {\n+ if (Strings.hasText(part) == false) {\n+ throw new IllegalArgumentException(\n+ \"object field starting or ending with a [.] makes object resolution ambiguous: [\" + fullFieldPath + \"]\");\n+ }\n+ }\n+ return parts;\n+ }\n+\n /** Creates a Mapping containing any dynamically added fields, or returns null if there were no dynamic mappings. */\n static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, List<Mapper> dynamicMappers) {\n if (dynamicMappers.isEmpty()) {\n@@ -184,7 +196,7 @@ static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, Li\n Iterator<Mapper> dynamicMapperItr = dynamicMappers.iterator();\n List<ObjectMapper> parentMappers = new ArrayList<>();\n Mapper firstUpdate = dynamicMapperItr.next();\n- parentMappers.add(createUpdate(mapping.root(), firstUpdate.name().split(\"\\\\.\"), 0, firstUpdate));\n+ parentMappers.add(createUpdate(mapping.root(), splitAndValidatePath(firstUpdate.name()), 0, firstUpdate));\n Mapper previousMapper = null;\n while (dynamicMapperItr.hasNext()) {\n Mapper newMapper = dynamicMapperItr.next();\n@@ -196,7 +208,7 @@ static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, Li\n continue;\n }\n previousMapper = newMapper;\n- String[] nameParts = newMapper.name().split(\"\\\\.\");\n+ String[] nameParts = splitAndValidatePath(newMapper.name());\n \n // We first need the stack to only contain mappers in common with the previously processed mapper\n // For example, if the first mapper processed was a.b.c, and we now have a.d, the stack will contain\n@@ -453,7 +465,7 @@ private static ObjectMapper parseObject(final ParseContext context, ObjectMapper\n context.path().remove();\n } else {\n \n- final String[] paths = currentFieldName.split(\"\\\\.\");\n+ final String[] paths = splitAndValidatePath(currentFieldName);\n currentFieldName = paths[paths.length - 1];\n Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, mapper);\n ObjectMapper parentMapper = parentMapperTuple.v2();\n@@ -497,7 +509,7 @@ private static void parseArray(ParseContext context, ObjectMapper parentMapper,\n }\n } else {\n \n- final String[] paths = arrayFieldName.split(\"\\\\.\");\n+ final String[] paths = splitAndValidatePath(arrayFieldName);\n arrayFieldName = paths[paths.length - 1];\n lastFieldName = arrayFieldName;\n Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper);\n@@ -561,7 +573,7 @@ private static void parseValue(final ParseContext context, ObjectMapper parentMa\n parseObjectOrField(context, mapper);\n } else {\n \n- final String[] paths = currentFieldName.split(\"\\\\.\");\n+ final String[] paths = splitAndValidatePath(currentFieldName);\n currentFieldName = paths[paths.length - 1];\n Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper);\n parentMapper = parentMapperTuple.v2();\n@@ -813,7 +825,7 @@ private static void parseCopy(String field, ParseContext context) throws IOExcep\n // The path of the dest field might be completely different from the current one so we need to reset it\n context = context.overridePath(new ContentPath(0));\n \n- final String[] paths = field.split(\"\\\\.\");\n+ final String[] paths = splitAndValidatePath(field);\n final String fieldName = paths[paths.length-1];\n Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, null);\n ObjectMapper mapper = parentMapperTuple.v2();\n@@ -897,7 +909,7 @@ private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper parentMapper,\n \n // looks up a child mapper, but takes into account field names that expand to objects\n static Mapper getMapper(ObjectMapper objectMapper, String fieldName) {\n- String[] subfields = fieldName.split(\"\\\\.\");\n+ String[] subfields = splitAndValidatePath(fieldName);\n for (int i = 0; i < subfields.length - 1; ++i) {\n Mapper mapper = objectMapper.getMapper(subfields[i]);\n if (mapper == null || (mapper instanceof ObjectMapper) == false) {", "filename": "core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java", "status": "modified" }, { "diff": "@@ -47,6 +47,7 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;\n import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.not;\n@@ -1262,4 +1263,36 @@ public void testDynamicDateDetectionEnabledWithNoSpecialCharacters() throws IOEx\n assertNotNull(dateMapper);\n assertThat(dateMapper, instanceOf(DateFieldMapper.class));\n }\n+\n+ public void testDynamicFieldsStartingAndEndingWithDot() throws Exception {\n+ BytesReference bytes = XContentFactory.jsonBuilder().startObject().startArray(\"top.\")\n+ .startObject().startArray(\"foo.\")\n+ .startObject()\n+ .field(\"thing\", \"bah\")\n+ .endObject().endArray()\n+ .endObject().endArray()\n+ .endObject().bytes();\n+\n+ client().prepareIndex(\"idx\", \"type\").setSource(bytes).get();\n+\n+ bytes = XContentFactory.jsonBuilder().startObject().startArray(\"top.\")\n+ .startObject().startArray(\"foo.\")\n+ .startObject()\n+ .startObject(\"bar.\")\n+ .startObject(\"aoeu\")\n+ .field(\"a\", 1).field(\"b\", 2)\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endArray().endObject().endArray()\n+ .endObject().bytes();\n+\n+ try {\n+ client().prepareIndex(\"idx\", \"type\").setSource(bytes).get();\n+ fail(\"should have failed to dynamically introduce a double-dot field\");\n+ } catch (IllegalArgumentException e) {\n+ assertThat(e.getMessage(),\n+ containsString(\"object field starting or ending with a [.] makes object resolution ambiguous: [top..foo..bar]\"));\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java", "status": "modified" }, { "diff": "@@ -257,21 +257,7 @@ public void testDocumentWithBlankFieldName() {\n }\n );\n assertThat(e.getMessage(), containsString(\"failed to parse\"));\n- assertThat(e.getRootCause().getMessage(), containsString(\"name cannot be empty string\"));\n- }\n-\n- @Override\n- protected Collection<Class<? extends Plugin>> nodePlugins() {\n- return Collections.singleton(InternalSettingsPlugin.class); // uses index.version.created\n- }\n-\n- public void testDocumentWithBlankFieldName2x() {\n- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_4);\n- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();\n- assertAcked(prepareCreate(\"test1\").setSettings(settings));\n- ensureGreen();\n-\n- IndexResponse indexResponse = client().prepareIndex(\"test1\", \"type\", \"1\").setSource(\"\", \"value1_2\").execute().actionGet();\n- assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());\n+ assertThat(e.getRootCause().getMessage(),\n+ containsString(\"object field starting or ending with a [.] makes object resolution ambiguous: []\"));\n }\n }", "filename": "core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java", "status": "modified" } ] }
{ "body": "The packaging tests fail with:\r\n\r\n```\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: Exception in thread \"main\" org.elasticsearch.bootstrap.BootstrapException: java.nio.file.NoSuchFileException\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: Likely root cause: java.nio.file.NoSuchFileException: /usr/share/elasticsearch/config\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at sun.nio.fs.UnixException.translateToIOException(UnixException.java:86)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:102)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:107)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at sun.nio.fs.UnixFileAttributeViews$Basic.readAttributes(UnixFileAttributeViews.java:55)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at sun.nio.fs.UnixFileSystemProvider.readAttributes(UnixFileSystemProvider.java:144)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at sun.nio.fs.LinuxFileSystemProvider.readAttributes(LinuxFileSystemProvider.java:99)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at java.nio.file.Files.readAttributes(Files.java:1737)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at java.nio.file.FileTreeWalker.getAttributes(FileTreeWalker.java:225)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at java.nio.file.FileTreeWalker.visit(FileTreeWalker.java:276)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at java.nio.file.FileTreeWalker.walk(FileTreeWalker.java:322)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at java.nio.file.Files.walkFileTree(Files.java:2662)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at org.elasticsearch.common.logging.LogConfigurator.configure(LogConfigurator.java:99)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at org.elasticsearch.common.logging.LogConfigurator.configure(LogConfigurator.java:82)\r\nJan 30 12:43:18 ubuntu elasticsearch[5661]: at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:305)\r\n```\r\n\r\nThe config directory is in `/etc/elasticsearch` but Elasticsearch picks up `/usr/share/elasticsearch/config`.\r\n\r\nThe problem has been introduced in https://github.com/elastic/elasticsearch/commit/aad51d44ab7363e53da773afa4707e0165eebda0#diff-192de7116527350709fbca18fe4d2087\r\n\r\nExample build: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+packaging-tests/898/console\r\nReproduction (on master): `gradle -Pvagrant.boxes=debian-8 packagingTest`", "comments": [ { "body": "@rjernst Could you please have a look?", "created_at": "2017-01-30T15:15:39Z" } ], "number": 22861, "title": "Wrong config directory is picked up" }
{ "body": "In #22762, settings preparation during bootstrap was changed slightly to\r\naccount for SecureSettings, by starting with a fresh settings builder\r\nafter reading the initial configuration. However, this the defaults from\r\nsystem properties were never re-read. This change fixes that bug (which\r\nwas never released).\r\n\r\ncloses #22861", "number": 22871, "review_comments": [], "title": "Settings: Fix settings reading to account for defaults" }
{ "commits": [ { "message": "Settings: Fix settings reading to account for defaults\n\nIn #22762, settings preparation during bootstrap was changed slightly to\naccount for SecureSettings, by starting with a fresh settings builder\nafter reading the initial configuration. However, this the defaults from\nsystem properties were never re-read. This change fixes that bug (which\nwas never released).\n\ncloses #22861" } ], "files": [ { "diff": "@@ -30,6 +30,7 @@\n import java.util.Set;\n import java.util.function.Function;\n import java.util.function.Predicate;\n+import java.util.function.UnaryOperator;\n \n import org.elasticsearch.cli.Terminal;\n import org.elasticsearch.cluster.ClusterName;\n@@ -44,8 +45,9 @@\n public class InternalSettingsPreparer {\n \n private static final String[] ALLOWED_SUFFIXES = {\".yml\", \".yaml\", \".json\"};\n- static final String PROPERTY_DEFAULTS_PREFIX = \"default.\";\n- static final Predicate<String> PROPERTY_DEFAULTS_PREDICATE = key -> key.startsWith(PROPERTY_DEFAULTS_PREFIX);\n+ private static final String PROPERTY_DEFAULTS_PREFIX = \"default.\";\n+ private static final Predicate<String> PROPERTY_DEFAULTS_PREDICATE = key -> key.startsWith(PROPERTY_DEFAULTS_PREFIX);\n+ private static final UnaryOperator<String> STRIP_PROPERTY_DEFAULTS_PREFIX = key -> key.substring(PROPERTY_DEFAULTS_PREFIX.length());\n \n public static final String SECRET_PROMPT_VALUE = \"${prompt.secret}\";\n public static final String TEXT_PROMPT_VALUE = \"${prompt.text}\";\n@@ -55,7 +57,7 @@ public class InternalSettingsPreparer {\n */\n public static Settings prepareSettings(Settings input) {\n Settings.Builder output = Settings.builder();\n- initializeSettings(output, input, true, Collections.emptyMap());\n+ initializeSettings(output, input, Collections.emptyMap());\n finalizeSettings(output, null);\n return output.build();\n }\n@@ -86,7 +88,7 @@ public static Environment prepareEnvironment(Settings input, Terminal terminal)\n public static Environment prepareEnvironment(Settings input, Terminal terminal, Map<String, String> properties) {\n // just create enough settings to build the environment, to get the config dir\n Settings.Builder output = Settings.builder();\n- initializeSettings(output, input, true, properties);\n+ initializeSettings(output, input, properties);\n Environment environment = new Environment(output.build());\n \n output = Settings.builder(); // start with a fresh output\n@@ -112,8 +114,7 @@ public static Environment prepareEnvironment(Settings input, Terminal terminal,\n }\n \n // re-initialize settings now that the config file has been loaded\n- // TODO: only re-initialize if a config file was actually loaded\n- initializeSettings(output, input, false, properties);\n+ initializeSettings(output, input, properties);\n finalizeSettings(output, terminal);\n \n environment = new Environment(output.build());\n@@ -127,11 +128,11 @@ public static Environment prepareEnvironment(Settings input, Terminal terminal,\n * Initializes the builder with the given input settings, and loads system properties settings if allowed.\n * If loadDefaults is true, system property default settings are loaded.\n */\n- private static void initializeSettings(Settings.Builder output, Settings input, boolean loadDefaults, Map<String, String> esSettings) {\n+ private static void initializeSettings(Settings.Builder output, Settings input, Map<String, String> esSettings) {\n output.put(input);\n- if (loadDefaults) {\n- output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE, key -> key.substring(PROPERTY_DEFAULTS_PREFIX.length()));\n- }\n+ output.putProperties(esSettings,\n+ PROPERTY_DEFAULTS_PREDICATE.and(key -> output.get(STRIP_PROPERTY_DEFAULTS_PREFIX.apply(key)) == null),\n+ STRIP_PROPERTY_DEFAULTS_PREFIX);\n output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE.negate(), Function.identity());\n output.replacePropertyPlaceholders();\n }", "filename": "core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java", "status": "modified" }, { "diff": "@@ -35,25 +35,32 @@\n \n import java.io.IOException;\n import java.io.InputStream;\n+import java.nio.charset.StandardCharsets;\n import java.nio.file.Files;\n import java.nio.file.Path;\n+import java.util.Arrays;\n+import java.util.Collections;\n+import java.util.Map;\n \n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n \n public class InternalSettingsPreparerTests extends ESTestCase {\n \n+ Path homeDir;\n Settings baseEnvSettings;\n \n @Before\n public void createBaseEnvSettings() {\n+ homeDir = createTempDir();\n baseEnvSettings = Settings.builder()\n- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())\n+ .put(Environment.PATH_HOME_SETTING.getKey(), homeDir)\n .build();\n }\n \n @After\n public void clearBaseEnvSettings() {\n+ homeDir = null;\n baseEnvSettings = null;\n }\n \n@@ -174,4 +181,19 @@ public void testSecureSettings() {\n Setting<SecureString> fakeSetting = SecureSetting.secureString(\"foo\", null, false);\n assertEquals(\"secret\", fakeSetting.get(env.settings()).toString());\n }\n+\n+ public void testDefaultProperties() throws Exception {\n+ Map<String, String> props = Collections.singletonMap(\"default.setting\", \"foo\");\n+ Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props);\n+ assertEquals(\"foo\", env.settings().get(\"setting\"));\n+ }\n+\n+ public void testDefaultPropertiesOverride() throws Exception {\n+ Path configDir = homeDir.resolve(\"config\");\n+ Files.createDirectories(configDir);\n+ Files.write(configDir.resolve(\"elasticsearch.yml\"), Collections.singletonList(\"setting: bar\"), StandardCharsets.UTF_8);\n+ Map<String, String> props = Collections.singletonMap(\"default.setting\", \"foo\");\n+ Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props);\n+ assertEquals(\"bar\", env.settings().get(\"setting\"));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java", "status": "modified" } ] }
{ "body": "The `index.store.preload` setting, which can be set at node level or index level, is not visible in the default settings for either the cluster or the index:\r\n\r\n PUT my_index\r\n \r\n GET my_index/_settings?include_defaults&flat_settings\r\n GET _cluster/settings?include_defaults&flat_settings\r\n", "comments": [ { "body": "Hi @clintongormley, i assume that `index.store.preload` is a module setting and is created when the first node is up. See [here](https://github.com/elastic/elasticsearch/blob/5.0/core/src/main/java/org/elasticsearch/index/IndexModule.java#L82). But [here](https://github.com/elastic/elasticsearch/blob/5.0/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java#L74) this setting is never added to the `keySettings` because `hasComplexMatcher` returned true for `index.store.preload`. What this `complexMatcher` exactly stands for?", "created_at": "2017-01-25T15:33:30Z" }, { "body": "Hey guys,\r\n\r\nI would like to work on this if possible :)", "created_at": "2017-03-13T04:06:16Z" }, { "body": "@clintongormley I think this could be closed. Both above two APIs can get the setting now. ", "created_at": "2017-12-16T09:27:34Z" }, { "body": "thanks @liketic - closing", "created_at": "2017-12-18T09:39:36Z" } ], "number": 22686, "title": "`index.store.preload` not present in default index settings or cluster settings" }
{ "body": "Fix #22686 \r\n\r\nThis PR not only fix #22686 but all settings that contains an empty array.", "number": 22850, "review_comments": [], "title": "Fix empty array setting" }
{ "commits": [ { "message": "fix empty array setting" }, { "message": "Merge branch 'master' into fix-empty-array" } ], "files": [ { "diff": "@@ -890,9 +890,17 @@ public Builder putArray(String setting, List<String> values) {\n break;\n }\n }\n- for (int i = 0; i < values.size(); i++) {\n- put(setting + \".\" + i, values.get(i));\n+\n+ if(values.size() <= 0)\n+ {\n+ put(setting, \"[]\");\n+ }\n+ else{\n+ for (int i = 0; i < values.size(); i++) {\n+ put(setting + \".\" + i, values.get(i));\n+ }\n }\n+\n return this;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/settings/Settings.java", "status": "modified" }, { "diff": "@@ -28,12 +28,7 @@\n import org.hamcrest.Matchers;\n \n import java.io.IOException;\n-import java.util.ArrayList;\n-import java.util.Iterator;\n-import java.util.List;\n-import java.util.Map;\n-import java.util.NoSuchElementException;\n-import java.util.Set;\n+import java.util.*;\n \n import static org.hamcrest.Matchers.allOf;\n import static org.hamcrest.Matchers.arrayContaining;\n@@ -550,9 +545,16 @@ public void testEmptyFilterMap() {\n expectThrows(NoSuchElementException.class, () -> iterator.next());\n }\n \n+ public void testEmptyArraySetting(){\n+ Settings settings = Settings.builder().putArray(\"foo.bar\", Collections.emptyList()).build();\n+\n+ assertThat(settings.get(\"foo.bar\"), equalTo(\"[]\"));\n+ }\n+ \n public void testEmpty() {\n assertTrue(Settings.EMPTY.isEmpty());\n MockSecureSettings secureSettings = new MockSecureSettings();\n assertTrue(Settings.builder().setSecureSettings(secureSettings).build().isEmpty());\n+\n }\n }", "filename": "core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java", "status": "modified" } ] }
{ "body": "The request cache currently does not check to see whether the search timed out on the shard before it adds the result to the cache. We should check `QueryShardResult.searchTimedOut()` and only cache the result if the search did not time out.", "comments": [ { "body": "sounds good\r\n", "created_at": "2017-01-25T16:08:22Z" } ], "number": 22789, "title": "Request cache should not cache timed out searches" }
{ "body": "Today we cache query results even if the query timed out. This is obviously\r\nproblematic since results are not complete. Yet, the decision if a query timed\r\nout or not happens too late to simply not cache the result since if we'd just throw\r\nan exception all currently waiting requests with the same request / cache key would\r\nfail with the same exception without the option to access the result or to re-execute.\r\nInstead, this change will allow the request to enter the cache but invalidates it immediately.\r\nConcurrent request might not get executed and return the timed out result which is not absolutely\r\ncorrect but very likely since identical requests will likely timeout as well. As a side-effect\r\nwe won't hammer the node with concurrent slow searches but rather only execute one of them\r\nand return shortly cached result.\r\n\r\nCloses #22789", "number": 22807, "review_comments": [], "title": "Invalidate cached query results if query timed out" }
{ "commits": [ { "message": "Invalidate cached query results if query timed out\n\nToday we cache query results even if the query timed out. This is obviously\nproblematic since results are not complete. Yet, the decision if a query timed\nout or not happens too late to simply not cache the result since if we'd just throw\nan exception all currently waiting requests with the same request / cache key would\nfail with the same exception without the option to access the result or to reexecute.\nInstead, this change will allow the request to enter the cache but invalidates it immediately.\nConcurrent request might not get executed and return the timed out result which is not absolutely\ncorrect but very likely since idendical requests will likey timeout as well. As a side-effect\nwe won't hammer the node with concurrent slow searches but rather only execute one of them\nand return shortly cached result.\n\nCloses #22789" } ], "files": [ { "diff": "@@ -130,6 +130,16 @@ BytesReference getOrCompute(CacheEntity cacheEntity, Supplier<BytesReference> lo\n return value;\n }\n \n+ /**\n+ * Invalidates the given the cache entry for the given key and it's context\n+ * @param cacheEntity the cache entity to invalidate for\n+ * @param reader the reader to invalidate the cache entry for\n+ * @param cacheKey the cache key to invalidate\n+ */\n+ void invalidate(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) {\n+ cache.invalidate(new Key(cacheEntity, reader.getVersion(), cacheKey));\n+ }\n+\n private static class Loader implements CacheLoader<Key, BytesReference> {\n \n private final CacheEntity entity;", "filename": "core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java", "status": "modified" }, { "diff": "@@ -1134,17 +1134,28 @@ public void loadIntoContext(ShardSearchRequest request, SearchContext context, Q\n queryPhase.execute(context);\n try {\n context.queryResult().writeToNoId(out);\n+\n } catch (IOException e) {\n throw new AssertionError(\"Could not serialize response\", e);\n }\n loadedFromCache[0] = false;\n });\n+\n if (loadedFromCache[0]) {\n // restore the cached query result into the context\n final QuerySearchResult result = context.queryResult();\n StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry);\n result.readFromWithId(context.id(), in);\n result.shardTarget(context.shardTarget());\n+ } else if (context.queryResult().searchTimedOut()) {\n+ // we have to invalidate the cache entry if we cached a query result form a request that timed out.\n+ // we can't really throw exceptions in the loading part to signal a timed out search to the outside world since if there are\n+ // multiple requests that wait for the cache entry to be calculated they'd fail all with the same exception.\n+ // instead we all caching such a result for the time being, return the timed out result for all other searches with that cache\n+ // key invalidate the result in the thread that caused the timeout. This will end up to be simpler and eventually correct since\n+ // running a search that times out concurrently will likely timeout again if it's run while we have this `stale` result in the\n+ // cache. One other option is to not cache requests with a timeout at all...\n+ indicesRequestCache.invalidate(new IndexShardCacheEntity(context.indexShard()), directoryReader, request.cacheKey());\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/indices/IndicesService.java", "status": "modified" }, { "diff": "@@ -169,7 +169,6 @@ public void testCacheDifferentReaders() throws Exception {\n assertEquals(2, requestCacheStats.stats().getMissCount());\n assertEquals(0, requestCacheStats.stats().getEvictions());\n assertTrue(loader.loadedFromCache);\n- assertTrue(loader.loadedFromCache);\n assertEquals(1, cache.count());\n assertEquals(cacheSize, requestCacheStats.stats().getMemorySize().bytesAsInt());\n assertEquals(1, cache.numRegisteredCloseListeners());\n@@ -186,7 +185,6 @@ public void testCacheDifferentReaders() throws Exception {\n assertEquals(2, requestCacheStats.stats().getMissCount());\n assertEquals(0, requestCacheStats.stats().getEvictions());\n assertTrue(loader.loadedFromCache);\n- assertTrue(loader.loadedFromCache);\n assertEquals(0, cache.count());\n assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt());\n \n@@ -215,7 +213,7 @@ public void testEviction() throws Exception {\n new ShardId(\"foo\", \"bar\", 1));\n TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard);\n Loader secondLoader = new Loader(secondReader, 0);\n- \n+\n BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes());\n assertEquals(\"foo\", value1.streamInput().readString());\n BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes());\n@@ -347,6 +345,74 @@ public BytesReference get() {\n \n }\n \n+ public void testInvalidate() throws Exception {\n+ ShardRequestCache requestCacheStats = new ShardRequestCache();\n+ IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY);\n+ Directory dir = newDirectory();\n+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());\n+\n+ writer.addDocument(newDoc(0, \"foo\"));\n+ DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),\n+ new ShardId(\"foo\", \"bar\", 1));\n+ TermQueryBuilder termQuery = new TermQueryBuilder(\"id\", \"0\");\n+ AtomicBoolean indexShard = new AtomicBoolean(true);\n+\n+ // initial cache\n+ TestEntity entity = new TestEntity(requestCacheStats, indexShard);\n+ Loader loader = new Loader(reader, 0);\n+ BytesReference value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes());\n+ assertEquals(\"foo\", value.streamInput().readString());\n+ assertEquals(0, requestCacheStats.stats().getHitCount());\n+ assertEquals(1, requestCacheStats.stats().getMissCount());\n+ assertEquals(0, requestCacheStats.stats().getEvictions());\n+ assertFalse(loader.loadedFromCache);\n+ assertEquals(1, cache.count());\n+\n+ // cache hit\n+ entity = new TestEntity(requestCacheStats, indexShard);\n+ loader = new Loader(reader, 0);\n+ value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes());\n+ assertEquals(\"foo\", value.streamInput().readString());\n+ assertEquals(1, requestCacheStats.stats().getHitCount());\n+ assertEquals(1, requestCacheStats.stats().getMissCount());\n+ assertEquals(0, requestCacheStats.stats().getEvictions());\n+ assertTrue(loader.loadedFromCache);\n+ assertEquals(1, cache.count());\n+ assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length());\n+ assertEquals(1, cache.numRegisteredCloseListeners());\n+\n+ // load again after invalidate\n+ entity = new TestEntity(requestCacheStats, indexShard);\n+ loader = new Loader(reader, 0);\n+ cache.invalidate(entity, reader, termQuery.buildAsBytes());\n+ value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes());\n+ assertEquals(\"foo\", value.streamInput().readString());\n+ assertEquals(1, requestCacheStats.stats().getHitCount());\n+ assertEquals(2, requestCacheStats.stats().getMissCount());\n+ assertEquals(0, requestCacheStats.stats().getEvictions());\n+ assertFalse(loader.loadedFromCache);\n+ assertEquals(1, cache.count());\n+ assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length());\n+ assertEquals(1, cache.numRegisteredCloseListeners());\n+\n+ // release\n+ if (randomBoolean()) {\n+ reader.close();\n+ } else {\n+ indexShard.set(false); // closed shard but reader is still open\n+ cache.clear(entity);\n+ }\n+ cache.cleanCache();\n+ assertEquals(1, requestCacheStats.stats().getHitCount());\n+ assertEquals(2, requestCacheStats.stats().getMissCount());\n+ assertEquals(0, requestCacheStats.stats().getEvictions());\n+ assertEquals(0, cache.count());\n+ assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt());\n+\n+ IOUtils.close(reader, writer, dir, cache);\n+ assertEquals(0, cache.numRegisteredCloseListeners());\n+ }\n+\n private class TestEntity extends AbstractIndexShardCacheEntity {\n private final AtomicBoolean standInForIndexShard;\n private final ShardRequestCache shardRequestCache;", "filename": "core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java", "status": "modified" } ] }
{ "body": "Reported at: https://discuss.elastic.co/t/combine-elasticsearch-5-1-1-and-repository-hdfs/69659\r\n\r\nIf you define as described [in our docs](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs-config.html) the following `elasticsearch.yml` settings:\r\n\r\n```yml\r\nrepositories:\r\n hdfs:\r\n uri: \"hdfs://es-master:9000/\" # optional - Hadoop file-system URI\r\n path: \"some/path\" # required - path with the file-system where data is stored/loaded\r\n```\r\n\r\nIt fails at startup because we don't register the global setting `repositories.hdfs.path` in `HdfsPlugin`.\r\n\r\nWe should either support all global `repositories.hdfs` settings we documented or remove that from our docs so people must provide those settings only when registering the repository with:\r\n\r\n```\r\nPUT _snapshot/my_hdfs_repository\r\n{\r\n \"type\": \"hdfs\",\r\n \"settings\": {\r\n \"uri\": \"hdfs://namenode:8020/\",\r\n \"path\": \"elasticsearch/respositories/my_hdfs_repository\",\r\n \"conf.dfs.client.read.shortcircuit\": \"true\"\r\n }\r\n}\r\n```\r\n", "comments": [ { "body": "@jbaiera I can fix it but I'd like to know what is your point of view on the above ^^^", "created_at": "2016-12-21T11:22:52Z" }, { "body": "@jbaiera ping :) Would love to have your opinion.", "created_at": "2017-01-16T09:29:19Z" }, { "body": "@dadoonet I think that registering the configurations would be the more sensible course of action. I'm happy to open a PR for it soon.", "created_at": "2017-01-26T03:46:30Z" }, { "body": "@jbaiera actually I think that based on discussion we had with @rjernst about s3 plugin I think we should better get rid of `repositories.impl` settings totally (see #22800). It makes the code a way too much complex for a very little gain.\r\n\r\nI'm going to open a PR about removing that from the documentation instead and we can discuss about it there (or here).", "created_at": "2017-01-26T08:17:29Z" }, { "body": "@dadoonet I'm fine with this course of action. I think it makes sense if the general consensus is to move away from the pattern. Thanks much!", "created_at": "2017-01-30T21:28:00Z" } ], "number": 22301, "title": "repositories.hdfs.path can not be set" }
{ "body": "Reported at: https://discuss.elastic.co/t/combine-elasticsearch-5-1-1-and-repository-hdfs/69659\r\n\r\nIf you define as described [in our docs](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs-config.html) the following `elasticsearch.yml` settings:\r\n\r\n```yml\r\nrepositories:\r\n hdfs:\r\n uri: \"hdfs://es-master:9000/\" # optional - Hadoop file-system URI\r\n path: \"some/path\" # required - path with the file-system where data is stored/loaded\r\n```\r\n\r\nIt fails at startup because we don't register the global setting `repositories.hdfs.path` in `HdfsPlugin`.\r\n\r\nThis PR removes that from our docs so people must provide those settings only when registering the repository with:\r\n\r\n```\r\nPUT _snapshot/my_hdfs_repository\r\n{\r\n \"type\": \"hdfs\",\r\n \"settings\": {\r\n \"uri\": \"hdfs://namenode:8020/\",\r\n \"path\": \"elasticsearch/respositories/my_hdfs_repository\",\r\n \"conf.dfs.client.read.shortcircuit\": \"true\"\r\n }\r\n}\r\n```\r\n\r\nBased on issue #22800.\r\n\r\nCloses #22301", "number": 22801, "review_comments": [], "title": "repositories.hdfs.path can not be set" }
{ "commits": [ { "message": "repositories.hdfs.path can not be set\n\nReported at: https://discuss.elastic.co/t/combine-elasticsearch-5-1-1-and-repository-hdfs/69659\n\nIf you define as described [in our docs](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs-config.html) the following `elasticsearch.yml` settings:\n\n```yml\nrepositories:\n hdfs:\n uri: \"hdfs://es-master:9000/\" # optional - Hadoop file-system URI\n path: \"some/path\" # required - path with the file-system where data is stored/loaded\n```\n\nIt fails at startup because we don't register the global setting `repositories.hdfs.path` in `HdfsPlugin`.\n\nThis PR removes that from our docs so people must provide those settings only when registering the repository with:\n\n```\nPUT _snapshot/my_hdfs_repository\n{\n \"type\": \"hdfs\",\n \"settings\": {\n \"uri\": \"hdfs://namenode:8020/\",\n \"path\": \"elasticsearch/respositories/my_hdfs_repository\",\n \"conf.dfs.client.read.shortcircuit\": \"true\"\n }\n}\n```\n\nBased on issue #22800.\n\nCloses #22301" } ], "files": [ { "diff": "@@ -95,16 +95,3 @@ The following settings are supported:\n \n Override the chunk size. (Disabled by default)\n \n-\n-Alternatively, you can define the `hdfs` repository and its settings in your `elasticsearch.yml`:\n-[source,yaml]\n-----\n-repositories:\n- hdfs:\n- uri: \"hdfs://<host>:<port>/\" \\# required - HDFS address only\n- path: \"some/path\" \\# required - path within the file-system where data is stored/loaded\n- load_defaults: \"true\" \\# optional - whether to load the default Hadoop configuration (default) or not\n- conf.<key> : \"<value>\" \\# optional - 'inlined' key=value added to the Hadoop configuration\n- compress: \"false\" \\# optional - whether to compress the metadata or not (default)\n- chunk_size: \"10mb\" \\# optional - chunk size (disabled by default)\n-----", "filename": "docs/plugins/repository-hdfs.asciidoc", "status": "modified" } ] }
{ "body": "In some cases (apparently with outlook files), mime4j library is needed.\r\nWe removed it in the past which can cause elasticsearch to crash when you are using ingest-attachment (and probably mapper-attachments as well in 2.x series) with a file which requires this library.\r\n\r\nSimilar problem as the one reported at #22077.", "comments": [ { "body": "@jasontedor As per your advice, I added:\r\n\r\n* http://svn.apache.org/repos/asf/tika/trunk/tika-parsers/src/test/resources/test-documents/test-outlook.msg\r\n* http://svn.apache.org/repos/asf/tika/trunk/tika-parsers/src/test/resources/test-documents/test-outlook2003.msg\r\n\r\nI can confirm it fails without the dependencies but succeed with them.\r\n\r\nThanks for the suggestion!", "created_at": "2017-01-25T07:54:50Z" } ], "number": 22764, "title": "Add missing mime4j library" }
{ "body": "In some cases (apparently with outlook files), mime4j library is needed.\r\nWe removed it in the past which can cause elasticsearch to crash when you are using ingest-attachment (and probably mapper-attachments as well in 2.x series) with a file which requires this library.\r\n\r\nBackport of #22764 in 5.x branch\r\nApplying same changes for mapper-attachments plugin (5.x branch)", "number": 22799, "review_comments": [], "title": "Add missing mime4j library" }
{ "commits": [ { "message": "Add missing mime4j library\n\nIn some cases (apparently with outlook files), mime4j library is needed.\nWe removed it in the past which can cause elasticsearch to crash when you are using ingest-attachment (and probably mapper-attachments as well in 2.x series) with a file which requires this library.\n\nBackport of #22764 in 5.x branch" }, { "message": "Add missing mime4j library\n\nApplying same changes as #22764 but for mapper-attachments plugin (5.x branch)" } ], "files": [ { "diff": "@@ -26,7 +26,8 @@ versions << [\n 'tika': '1.14',\n 'pdfbox': '2.0.3',\n 'bouncycastle': '1.55',\n- 'poi': '3.15'\n+ 'poi': '3.15',\n+ 'mime4j': '0.7.2'\n ]\n \n dependencies {\n@@ -59,11 +60,19 @@ dependencies {\n compile \"org.apache.poi:poi-scratchpad:${versions.poi}\"\n // Apple iWork\n compile 'org.apache.commons:commons-compress:1.10'\n+ // Outlook documents\n+ compile \"org.apache.james:apache-mime4j-core:${versions.mime4j}\"\n+ compile \"org.apache.james:apache-mime4j-dom:${versions.mime4j}\"\n }\n \n // TODO: stop using LanguageIdentifier...\n compileJava.options.compilerArgs << \"-Xlint:-deprecation\"\n \n+\n+dependencyLicenses {\n+ mapping from: /apache-mime4j-.*/, to: 'apache-mime4j'\n+}\n+\n forbiddenPatterns {\n exclude '**/*.docx'\n exclude '**/*.pdf'\n@@ -530,25 +539,6 @@ thirdPartyAudit.excludes = [\n 'org.apache.http.client.utils.URIBuilder',\n 'org.apache.http.entity.ByteArrayEntity',\n 'org.apache.http.impl.client.DefaultHttpClient',\n- 'org.apache.james.mime4j.MimeException',\n- 'org.apache.james.mime4j.codec.DecodeMonitor',\n- 'org.apache.james.mime4j.codec.DecoderUtil',\n- 'org.apache.james.mime4j.dom.FieldParser',\n- 'org.apache.james.mime4j.dom.address.Address',\n- 'org.apache.james.mime4j.dom.address.AddressList',\n- 'org.apache.james.mime4j.dom.address.Mailbox',\n- 'org.apache.james.mime4j.dom.address.MailboxList',\n- 'org.apache.james.mime4j.dom.field.AddressListField',\n- 'org.apache.james.mime4j.dom.field.DateTimeField',\n- 'org.apache.james.mime4j.dom.field.MailboxListField',\n- 'org.apache.james.mime4j.dom.field.ParsedField',\n- 'org.apache.james.mime4j.dom.field.UnstructuredField',\n- 'org.apache.james.mime4j.field.LenientFieldParser',\n- 'org.apache.james.mime4j.parser.ContentHandler',\n- 'org.apache.james.mime4j.parser.MimeStreamParser',\n- 'org.apache.james.mime4j.stream.BodyDescriptor',\n- 'org.apache.james.mime4j.stream.Field',\n- 'org.apache.james.mime4j.stream.MimeConfig',\n 'org.apache.jcp.xml.dsig.internal.dom.DOMDigestMethod',\n 'org.apache.jcp.xml.dsig.internal.dom.DOMKeyInfo',\n 'org.apache.jcp.xml.dsig.internal.dom.DOMReference',", "filename": "plugins/ingest-attachment/build.gradle", "status": "modified" }, { "diff": "@@ -0,0 +1,361 @@\n+ Apache License\n+ Version 2.0, January 2004\n+ http://www.apache.org/licenses/\n+\n+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n+\n+ 1. Definitions.\n+\n+ \"License\" shall mean the terms and conditions for use, reproduction,\n+ and distribution as defined by Sections 1 through 9 of this document.\n+\n+ \"Licensor\" shall mean the copyright owner or entity authorized by\n+ the copyright owner that is granting the License.\n+\n+ \"Legal Entity\" shall mean the union of the acting entity and all\n+ other entities that control, are controlled by, or are under common\n+ control with that entity. For the purposes of this definition,\n+ \"control\" means (i) the power, direct or indirect, to cause the\n+ direction or management of such entity, whether by contract or\n+ otherwise, or (ii) ownership of fifty percent (50%) or more of the\n+ outstanding shares, or (iii) beneficial ownership of such entity.\n+\n+ \"You\" (or \"Your\") shall mean an individual or Legal Entity\n+ exercising permissions granted by this License.\n+\n+ \"Source\" form shall mean the preferred form for making modifications,\n+ including but not limited to software source code, documentation\n+ source, and configuration files.\n+\n+ \"Object\" form shall mean any form resulting from mechanical\n+ transformation or translation of a Source form, including but\n+ not limited to compiled object code, generated documentation,\n+ and conversions to other media types.\n+\n+ \"Work\" shall mean the work of authorship, whether in Source or\n+ Object form, made available under the License, as indicated by a\n+ copyright notice that is included in or attached to the work\n+ (an example is provided in the Appendix below).\n+\n+ \"Derivative Works\" shall mean any work, whether in Source or Object\n+ form, that is based on (or derived from) the Work and for which the\n+ editorial revisions, annotations, elaborations, or other modifications\n+ represent, as a whole, an original work of authorship. For the purposes\n+ of this License, Derivative Works shall not include works that remain\n+ separable from, or merely link (or bind by name) to the interfaces of,\n+ the Work and Derivative Works thereof.\n+\n+ \"Contribution\" shall mean any work of authorship, including\n+ the original version of the Work and any modifications or additions\n+ to that Work or Derivative Works thereof, that is intentionally\n+ submitted to Licensor for inclusion in the Work by the copyright owner\n+ or by an individual or Legal Entity authorized to submit on behalf of\n+ the copyright owner. For the purposes of this definition, \"submitted\"\n+ means any form of electronic, verbal, or written communication sent\n+ to the Licensor or its representatives, including but not limited to\n+ communication on electronic mailing lists, source code control systems,\n+ and issue tracking systems that are managed by, or on behalf of, the\n+ Licensor for the purpose of discussing and improving the Work, but\n+ excluding communication that is conspicuously marked or otherwise\n+ designated in writing by the copyright owner as \"Not a Contribution.\"\n+\n+ \"Contributor\" shall mean Licensor and any individual or Legal Entity\n+ on behalf of whom a Contribution has been received by Licensor and\n+ subsequently incorporated within the Work.\n+\n+ 2. Grant of Copyright License. Subject to the terms and conditions of\n+ this License, each Contributor hereby grants to You a perpetual,\n+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n+ copyright license to reproduce, prepare Derivative Works of,\n+ publicly display, publicly perform, sublicense, and distribute the\n+ Work and such Derivative Works in Source or Object form.\n+\n+ 3. Grant of Patent License. Subject to the terms and conditions of\n+ this License, each Contributor hereby grants to You a perpetual,\n+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n+ (except as stated in this section) patent license to make, have made,\n+ use, offer to sell, sell, import, and otherwise transfer the Work,\n+ where such license applies only to those patent claims licensable\n+ by such Contributor that are necessarily infringed by their\n+ Contribution(s) alone or by combination of their Contribution(s)\n+ with the Work to which such Contribution(s) was submitted. If You\n+ institute patent litigation against any entity (including a\n+ cross-claim or counterclaim in a lawsuit) alleging that the Work\n+ or a Contribution incorporated within the Work constitutes direct\n+ or contributory patent infringement, then any patent licenses\n+ granted to You under this License for that Work shall terminate\n+ as of the date such litigation is filed.\n+\n+ 4. Redistribution. You may reproduce and distribute copies of the\n+ Work or Derivative Works thereof in any medium, with or without\n+ modifications, and in Source or Object form, provided that You\n+ meet the following conditions:\n+\n+ (a) You must give any other recipients of the Work or\n+ Derivative Works a copy of this License; and\n+\n+ (b) You must cause any modified files to carry prominent notices\n+ stating that You changed the files; and\n+\n+ (c) You must retain, in the Source form of any Derivative Works\n+ that You distribute, all copyright, patent, trademark, and\n+ attribution notices from the Source form of the Work,\n+ excluding those notices that do not pertain to any part of\n+ the Derivative Works; and\n+\n+ (d) If the Work includes a \"NOTICE\" text file as part of its\n+ distribution, then any Derivative Works that You distribute must\n+ include a readable copy of the attribution notices contained\n+ within such NOTICE file, excluding those notices that do not\n+ pertain to any part of the Derivative Works, in at least one\n+ of the following places: within a NOTICE text file distributed\n+ as part of the Derivative Works; within the Source form or\n+ documentation, if provided along with the Derivative Works; or,\n+ within a display generated by the Derivative Works, if and\n+ wherever such third-party notices normally appear. The contents\n+ of the NOTICE file are for informational purposes only and\n+ do not modify the License. You may add Your own attribution\n+ notices within Derivative Works that You distribute, alongside\n+ or as an addendum to the NOTICE text from the Work, provided\n+ that such additional attribution notices cannot be construed\n+ as modifying the License.\n+\n+ You may add Your own copyright statement to Your modifications and\n+ may provide additional or different license terms and conditions\n+ for use, reproduction, or distribution of Your modifications, or\n+ for any such Derivative Works as a whole, provided Your use,\n+ reproduction, and distribution of the Work otherwise complies with\n+ the conditions stated in this License.\n+\n+ 5. Submission of Contributions. Unless You explicitly state otherwise,\n+ any Contribution intentionally submitted for inclusion in the Work\n+ by You to the Licensor shall be under the terms and conditions of\n+ this License, without any additional terms or conditions.\n+ Notwithstanding the above, nothing herein shall supersede or modify\n+ the terms of any separate license agreement you may have executed\n+ with Licensor regarding such Contributions.\n+\n+ 6. Trademarks. This License does not grant permission to use the trade\n+ names, trademarks, service marks, or product names of the Licensor,\n+ except as required for reasonable and customary use in describing the\n+ origin of the Work and reproducing the content of the NOTICE file.\n+\n+ 7. Disclaimer of Warranty. Unless required by applicable law or\n+ agreed to in writing, Licensor provides the Work (and each\n+ Contributor provides its Contributions) on an \"AS IS\" BASIS,\n+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+ implied, including, without limitation, any warranties or conditions\n+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n+ PARTICULAR PURPOSE. You are solely responsible for determining the\n+ appropriateness of using or redistributing the Work and assume any\n+ risks associated with Your exercise of permissions under this License.\n+\n+ 8. Limitation of Liability. In no event and under no legal theory,\n+ whether in tort (including negligence), contract, or otherwise,\n+ unless required by applicable law (such as deliberate and grossly\n+ negligent acts) or agreed to in writing, shall any Contributor be\n+ liable to You for damages, including any direct, indirect, special,\n+ incidental, or consequential damages of any character arising as a\n+ result of this License or out of the use or inability to use the\n+ Work (including but not limited to damages for loss of goodwill,\n+ work stoppage, computer failure or malfunction, or any and all\n+ other commercial damages or losses), even if such Contributor\n+ has been advised of the possibility of such damages.\n+\n+ 9. Accepting Warranty or Additional Liability. While redistributing\n+ the Work or Derivative Works thereof, You may choose to offer,\n+ and charge a fee for, acceptance of support, warranty, indemnity,\n+ or other liability obligations and/or rights consistent with this\n+ License. However, in accepting such obligations, You may act only\n+ on Your own behalf and on Your sole responsibility, not on behalf\n+ of any other Contributor, and only if You agree to indemnify,\n+ defend, and hold each Contributor harmless for any liability\n+ incurred by, or claims asserted against, such Contributor by reason\n+ of your accepting any such warranty or additional liability.\n+\n+ END OF TERMS AND CONDITIONS\n+\n+\n+\n+\n+ THIS PRODUCT ALSO INCLUDES THIRD PARTY SOFTWARE REDISTRIBUTED UNDER THE\n+ FOLLOWING LICENSES:\n+\n+\tApache Commons Logging,\n+\t The Apache Software License, Version 1.1 (commons-logging-1.1.1.jar)\n+\n+\t\t The Apache Software License, Version 1.1\n+\n+\t\t Redistribution and use in source and binary forms, with or without\n+\t\t modification, are permitted provided that the following conditions\n+\t\t are met:\n+\n+\t\t 1. Redistributions of source code must retain the above copyright\n+\t\t notice, this list of conditions and the following disclaimer.\n+\n+\t\t 2. Redistributions in binary form must reproduce the above copyright\n+\t\t notice, this list of conditions and the following disclaimer in\n+\t\t the documentation and/or other materials provided with the\n+\t\t distribution.\n+\n+\t\t 3. The end-user documentation included with the redistribution,\n+\t\t if any, must include the following acknowledgment:\n+\t\t \"This product includes software developed by the\n+\t\t Apache Software Foundation (http://www.apache.org/).\"\n+\t\t Alternately, this acknowledgment may appear in the software itself,\n+\t\t if and wherever such third-party acknowledgments normally appear.\n+\n+\t\t 4. The names \"Apache\" and \"Apache Software Foundation\" must\n+\t\t not be used to endorse or promote products derived from this\n+\t\t software without prior written permission. For written\n+\t\t permission, please contact apache@apache.org.\n+\n+\t\t 5. Products derived from this software may not be called \"Apache\",\n+\t\t nor may \"Apache\" appear in their name, without prior written\n+\t\t permission of the Apache Software Foundation.\n+\n+\t\t THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED\n+\t\t WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n+\t\t OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+\t\t DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR\n+\t\t ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+\t\t SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+\t\t LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n+\t\t USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+\t\t ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n+\t\t OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n+\t\t OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+\t\t SUCH DAMAGE.\n+\n+\n+\tTest messages from the Perl-MIME-Tools project,\n+\n+\t\t\t\t\t The \"Artistic License\"\n+\n+\t\t\t\t\t\tPreamble\n+\n+\t\tThe intent of this document is to state the conditions under which a\n+\t\tPackage may be copied, such that the Copyright Holder maintains some\n+\t\tsemblance of artistic control over the development of the package,\n+\t\twhile giving the users of the package the right to use and distribute\n+\t\tthe Package in a more-or-less customary fashion, plus the right to make\n+\t\treasonable modifications.\n+\n+\t\tDefinitions:\n+\n+\t\t\t\"Package\" refers to the collection of files distributed by the\n+\t\t\tCopyright Holder, and derivatives of that collection of files\n+\t\t\tcreated through textual modification.\n+\n+\t\t\t\"Standard Version\" refers to such a Package if it has not been\n+\t\t\tmodified, or has been modified in accordance with the wishes\n+\t\t\tof the Copyright Holder as specified below.\n+\n+\t\t\t\"Copyright Holder\" is whoever is named in the copyright or\n+\t\t\tcopyrights for the package.\n+\n+\t\t\t\"You\" is you, if you're thinking about copying or distributing\n+\t\t\tthis Package.\n+\n+\t\t\t\"Reasonable copying fee\" is whatever you can justify on the\n+\t\t\tbasis of media cost, duplication charges, time of people involved,\n+\t\t\tand so on. (You will not be required to justify it to the\n+\t\t\tCopyright Holder, but only to the computing community at large\n+\t\t\tas a market that must bear the fee.)\n+\n+\t\t\t\"Freely Available\" means that no fee is charged for the item\n+\t\t\titself, though there may be fees involved in handling the item.\n+\t\t\tIt also means that recipients of the item may redistribute it\n+\t\t\tunder the same conditions they received it.\n+\n+\t\t1. You may make and give away verbatim copies of the source form of the\n+\t\tStandard Version of this Package without restriction, provided that you\n+\t\tduplicate all of the original copyright notices and associated disclaimers.\n+\n+\t\t2. You may apply bug fixes, portability fixes and other modifications\n+\t\tderived from the Public Domain or from the Copyright Holder. A Package\n+\t\tmodified in such a way shall still be considered the Standard Version.\n+\n+\t\t3. You may otherwise modify your copy of this Package in any way, provided\n+\t\tthat you insert a prominent notice in each changed file stating how and\n+\t\twhen you changed that file, and provided that you do at least ONE of the\n+\t\tfollowing:\n+\n+\t\t a) place your modifications in the Public Domain or otherwise make them\n+\t\t Freely Available, such as by posting said modifications to Usenet or\n+\t\t an equivalent medium, or placing the modifications on a major archive\n+\t\t site such as uunet.uu.net, or by allowing the Copyright Holder to include\n+\t\t your modifications in the Standard Version of the Package.\n+\n+\t\t b) use the modified Package only within your corporation or organization.\n+\n+\t\t c) rename any non-standard executables so the names do not conflict\n+\t\t with standard executables, which must also be provided, and provide\n+\t\t a separate manual page for each non-standard executable that clearly\n+\t\t documents how it differs from the Standard Version.\n+\n+\t\t d) make other distribution arrangements with the Copyright Holder.\n+\n+\t\t4. You may distribute the programs of this Package in object code or\n+\t\texecutable form, provided that you do at least ONE of the following:\n+\n+\t\t a) distribute a Standard Version of the executables and library files,\n+\t\t together with instructions (in the manual page or equivalent) on where\n+\t\t to get the Standard Version.\n+\n+\t\t b) accompany the distribution with the machine-readable source of\n+\t\t the Package with your modifications.\n+\n+\t\t c) give non-standard executables non-standard names, and clearly\n+\t\t document the differences in manual pages (or equivalent), together\n+\t\t with instructions on where to get the Standard Version.\n+\n+\t\t d) make other distribution arrangements with the Copyright Holder.\n+\n+\t\t5. You may charge a reasonable copying fee for any distribution of this\n+\t\tPackage. You may charge any fee you choose for support of this\n+\t\tPackage. You may not charge a fee for this Package itself. However,\n+\t\tyou may distribute this Package in aggregate with other (possibly\n+\t\tcommercial) programs as part of a larger (possibly commercial) software\n+\t\tdistribution provided that you do not advertise this Package as a\n+\t\tproduct of your own. You may embed this Package's interpreter within\n+\t\tan executable of yours (by linking); this shall be construed as a mere\n+\t\tform of aggregation, provided that the complete Standard Version of the\n+\t\tinterpreter is so embedded.\n+\n+\t\t6. The scripts and library files supplied as input to or produced as\n+\t\toutput from the programs of this Package do not automatically fall\n+\t\tunder the copyright of this Package, but belong to whoever generated\n+\t\tthem, and may be sold commercially, and may be aggregated with this\n+\t\tPackage. If such scripts or library files are aggregated with this\n+\t\tPackage via the so-called \"undump\" or \"unexec\" methods of producing a\n+\t\tbinary executable image, then distribution of such an image shall\n+\t\tneither be construed as a distribution of this Package nor shall it\n+\t\tfall under the restrictions of Paragraphs 3 and 4, provided that you do\n+\t\tnot represent such an executable image as a Standard Version of this\n+\t\tPackage.\n+\n+\t\t7. C subroutines (or comparably compiled subroutines in other\n+\t\tlanguages) supplied by you and linked into this Package in order to\n+\t\temulate subroutines and variables of the language defined by this\n+\t\tPackage shall not be considered part of this Package, but are the\n+\t\tequivalent of input as in Paragraph 6, provided these subroutines do\n+\t\tnot change the language in any way that would cause it to fail the\n+\t\tregression tests for the language.\n+\n+\t\t8. Aggregation of this Package with a commercial distribution is always\n+\t\tpermitted provided that the use of this Package is embedded; that is,\n+\t\twhen no overt attempt is made to make this Package's interfaces visible\n+\t\tto the end user of the commercial distribution. Such use shall not be\n+\t\tconstrued as a distribution of this Package.\n+\n+\t\t9. The name of the Copyright Holder may not be used to endorse or promote\n+\t\tproducts derived from this software without specific prior written permission.\n+\n+\t\t10. THIS PACKAGE IS PROVIDED \"AS IS\" AND WITHOUT ANY EXPRESS OR\n+\t\tIMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED\n+\t\tWARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.\n+\n+\t\t\t\t\t\tThe End\n+\n+", "filename": "plugins/ingest-attachment/licenses/apache-mime4j-LICENSE.txt", "status": "added" }, { "diff": "@@ -0,0 +1,13 @@\n+ =========================================================================\n+ == NOTICE file for use with the Apache License, Version 2.0, ==\n+ =========================================================================\n+ \n+ Apache JAMES Mime4j\n+ Copyright 2004-2010 The Apache Software Foundation\n+ \n+ This product includes software developed at\n+ The Apache Software Foundation (http://www.apache.org/).\n+\n+ This product test suite includes data (mimetools-testmsgs folder) developed \n+ by Eryq and ZeeGee Software Inc as part of the \"MIME-tools\" Perl5 toolkit\n+ and licensed under the Artistic License", "filename": "plugins/ingest-attachment/licenses/apache-mime4j-NOTICE.txt", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+a81264fe0265ebe8fd1d8128aad06dc320de6eef\n\\ No newline at end of file", "filename": "plugins/ingest-attachment/licenses/apache-mime4j-core-0.7.2.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+1c289aa264548a0a1f1b43685a9cb2ab23f67287\n\\ No newline at end of file", "filename": "plugins/ingest-attachment/licenses/apache-mime4j-dom-0.7.2.jar.sha1", "status": "added" }, { "diff": "", "filename": "plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/tika-files.zip", "status": "modified" }, { "diff": "@@ -26,7 +26,8 @@ versions << [\n 'tika': '1.14',\n 'pdfbox': '2.0.3',\n 'bouncycastle': '1.55',\n- 'poi': '3.15'\n+ 'poi': '3.15',\n+ 'mime4j': '0.7.2'\n ]\n \n dependencies {\n@@ -59,11 +60,19 @@ dependencies {\n compile \"org.apache.poi:poi-scratchpad:${versions.poi}\"\n // Apple iWork\n compile 'org.apache.commons:commons-compress:1.10'\n+ // Outlook documents\n+ compile \"org.apache.james:apache-mime4j-core:${versions.mime4j}\"\n+ compile \"org.apache.james:apache-mime4j-dom:${versions.mime4j}\"\n }\n \n // TODO: stop using LanguageIdentifier...\n compileJava.options.compilerArgs << \"-Xlint:-deprecation\"\n \n+\n+dependencyLicenses {\n+ mapping from: /apache-mime4j-.*/, to: 'apache-mime4j'\n+}\n+\n forbiddenPatterns {\n exclude '**/*.docx'\n exclude '**/*.pdf'\n@@ -530,25 +539,6 @@ thirdPartyAudit.excludes = [\n 'org.apache.http.client.utils.URIBuilder',\n 'org.apache.http.entity.ByteArrayEntity',\n 'org.apache.http.impl.client.DefaultHttpClient',\n- 'org.apache.james.mime4j.MimeException',\n- 'org.apache.james.mime4j.codec.DecodeMonitor',\n- 'org.apache.james.mime4j.codec.DecoderUtil',\n- 'org.apache.james.mime4j.dom.FieldParser',\n- 'org.apache.james.mime4j.dom.address.Address',\n- 'org.apache.james.mime4j.dom.address.AddressList',\n- 'org.apache.james.mime4j.dom.address.Mailbox',\n- 'org.apache.james.mime4j.dom.address.MailboxList',\n- 'org.apache.james.mime4j.dom.field.AddressListField',\n- 'org.apache.james.mime4j.dom.field.DateTimeField',\n- 'org.apache.james.mime4j.dom.field.MailboxListField',\n- 'org.apache.james.mime4j.dom.field.ParsedField',\n- 'org.apache.james.mime4j.dom.field.UnstructuredField',\n- 'org.apache.james.mime4j.field.LenientFieldParser',\n- 'org.apache.james.mime4j.parser.ContentHandler',\n- 'org.apache.james.mime4j.parser.MimeStreamParser',\n- 'org.apache.james.mime4j.stream.BodyDescriptor',\n- 'org.apache.james.mime4j.stream.Field',\n- 'org.apache.james.mime4j.stream.MimeConfig',\n 'org.apache.jcp.xml.dsig.internal.dom.DOMDigestMethod',\n 'org.apache.jcp.xml.dsig.internal.dom.DOMKeyInfo',\n 'org.apache.jcp.xml.dsig.internal.dom.DOMReference',", "filename": "plugins/mapper-attachments/build.gradle", "status": "modified" }, { "diff": "@@ -0,0 +1,361 @@\n+ Apache License\n+ Version 2.0, January 2004\n+ http://www.apache.org/licenses/\n+\n+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n+\n+ 1. Definitions.\n+\n+ \"License\" shall mean the terms and conditions for use, reproduction,\n+ and distribution as defined by Sections 1 through 9 of this document.\n+\n+ \"Licensor\" shall mean the copyright owner or entity authorized by\n+ the copyright owner that is granting the License.\n+\n+ \"Legal Entity\" shall mean the union of the acting entity and all\n+ other entities that control, are controlled by, or are under common\n+ control with that entity. For the purposes of this definition,\n+ \"control\" means (i) the power, direct or indirect, to cause the\n+ direction or management of such entity, whether by contract or\n+ otherwise, or (ii) ownership of fifty percent (50%) or more of the\n+ outstanding shares, or (iii) beneficial ownership of such entity.\n+\n+ \"You\" (or \"Your\") shall mean an individual or Legal Entity\n+ exercising permissions granted by this License.\n+\n+ \"Source\" form shall mean the preferred form for making modifications,\n+ including but not limited to software source code, documentation\n+ source, and configuration files.\n+\n+ \"Object\" form shall mean any form resulting from mechanical\n+ transformation or translation of a Source form, including but\n+ not limited to compiled object code, generated documentation,\n+ and conversions to other media types.\n+\n+ \"Work\" shall mean the work of authorship, whether in Source or\n+ Object form, made available under the License, as indicated by a\n+ copyright notice that is included in or attached to the work\n+ (an example is provided in the Appendix below).\n+\n+ \"Derivative Works\" shall mean any work, whether in Source or Object\n+ form, that is based on (or derived from) the Work and for which the\n+ editorial revisions, annotations, elaborations, or other modifications\n+ represent, as a whole, an original work of authorship. For the purposes\n+ of this License, Derivative Works shall not include works that remain\n+ separable from, or merely link (or bind by name) to the interfaces of,\n+ the Work and Derivative Works thereof.\n+\n+ \"Contribution\" shall mean any work of authorship, including\n+ the original version of the Work and any modifications or additions\n+ to that Work or Derivative Works thereof, that is intentionally\n+ submitted to Licensor for inclusion in the Work by the copyright owner\n+ or by an individual or Legal Entity authorized to submit on behalf of\n+ the copyright owner. For the purposes of this definition, \"submitted\"\n+ means any form of electronic, verbal, or written communication sent\n+ to the Licensor or its representatives, including but not limited to\n+ communication on electronic mailing lists, source code control systems,\n+ and issue tracking systems that are managed by, or on behalf of, the\n+ Licensor for the purpose of discussing and improving the Work, but\n+ excluding communication that is conspicuously marked or otherwise\n+ designated in writing by the copyright owner as \"Not a Contribution.\"\n+\n+ \"Contributor\" shall mean Licensor and any individual or Legal Entity\n+ on behalf of whom a Contribution has been received by Licensor and\n+ subsequently incorporated within the Work.\n+\n+ 2. Grant of Copyright License. Subject to the terms and conditions of\n+ this License, each Contributor hereby grants to You a perpetual,\n+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n+ copyright license to reproduce, prepare Derivative Works of,\n+ publicly display, publicly perform, sublicense, and distribute the\n+ Work and such Derivative Works in Source or Object form.\n+\n+ 3. Grant of Patent License. Subject to the terms and conditions of\n+ this License, each Contributor hereby grants to You a perpetual,\n+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n+ (except as stated in this section) patent license to make, have made,\n+ use, offer to sell, sell, import, and otherwise transfer the Work,\n+ where such license applies only to those patent claims licensable\n+ by such Contributor that are necessarily infringed by their\n+ Contribution(s) alone or by combination of their Contribution(s)\n+ with the Work to which such Contribution(s) was submitted. If You\n+ institute patent litigation against any entity (including a\n+ cross-claim or counterclaim in a lawsuit) alleging that the Work\n+ or a Contribution incorporated within the Work constitutes direct\n+ or contributory patent infringement, then any patent licenses\n+ granted to You under this License for that Work shall terminate\n+ as of the date such litigation is filed.\n+\n+ 4. Redistribution. You may reproduce and distribute copies of the\n+ Work or Derivative Works thereof in any medium, with or without\n+ modifications, and in Source or Object form, provided that You\n+ meet the following conditions:\n+\n+ (a) You must give any other recipients of the Work or\n+ Derivative Works a copy of this License; and\n+\n+ (b) You must cause any modified files to carry prominent notices\n+ stating that You changed the files; and\n+\n+ (c) You must retain, in the Source form of any Derivative Works\n+ that You distribute, all copyright, patent, trademark, and\n+ attribution notices from the Source form of the Work,\n+ excluding those notices that do not pertain to any part of\n+ the Derivative Works; and\n+\n+ (d) If the Work includes a \"NOTICE\" text file as part of its\n+ distribution, then any Derivative Works that You distribute must\n+ include a readable copy of the attribution notices contained\n+ within such NOTICE file, excluding those notices that do not\n+ pertain to any part of the Derivative Works, in at least one\n+ of the following places: within a NOTICE text file distributed\n+ as part of the Derivative Works; within the Source form or\n+ documentation, if provided along with the Derivative Works; or,\n+ within a display generated by the Derivative Works, if and\n+ wherever such third-party notices normally appear. The contents\n+ of the NOTICE file are for informational purposes only and\n+ do not modify the License. You may add Your own attribution\n+ notices within Derivative Works that You distribute, alongside\n+ or as an addendum to the NOTICE text from the Work, provided\n+ that such additional attribution notices cannot be construed\n+ as modifying the License.\n+\n+ You may add Your own copyright statement to Your modifications and\n+ may provide additional or different license terms and conditions\n+ for use, reproduction, or distribution of Your modifications, or\n+ for any such Derivative Works as a whole, provided Your use,\n+ reproduction, and distribution of the Work otherwise complies with\n+ the conditions stated in this License.\n+\n+ 5. Submission of Contributions. Unless You explicitly state otherwise,\n+ any Contribution intentionally submitted for inclusion in the Work\n+ by You to the Licensor shall be under the terms and conditions of\n+ this License, without any additional terms or conditions.\n+ Notwithstanding the above, nothing herein shall supersede or modify\n+ the terms of any separate license agreement you may have executed\n+ with Licensor regarding such Contributions.\n+\n+ 6. Trademarks. This License does not grant permission to use the trade\n+ names, trademarks, service marks, or product names of the Licensor,\n+ except as required for reasonable and customary use in describing the\n+ origin of the Work and reproducing the content of the NOTICE file.\n+\n+ 7. Disclaimer of Warranty. Unless required by applicable law or\n+ agreed to in writing, Licensor provides the Work (and each\n+ Contributor provides its Contributions) on an \"AS IS\" BASIS,\n+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+ implied, including, without limitation, any warranties or conditions\n+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n+ PARTICULAR PURPOSE. You are solely responsible for determining the\n+ appropriateness of using or redistributing the Work and assume any\n+ risks associated with Your exercise of permissions under this License.\n+\n+ 8. Limitation of Liability. In no event and under no legal theory,\n+ whether in tort (including negligence), contract, or otherwise,\n+ unless required by applicable law (such as deliberate and grossly\n+ negligent acts) or agreed to in writing, shall any Contributor be\n+ liable to You for damages, including any direct, indirect, special,\n+ incidental, or consequential damages of any character arising as a\n+ result of this License or out of the use or inability to use the\n+ Work (including but not limited to damages for loss of goodwill,\n+ work stoppage, computer failure or malfunction, or any and all\n+ other commercial damages or losses), even if such Contributor\n+ has been advised of the possibility of such damages.\n+\n+ 9. Accepting Warranty or Additional Liability. While redistributing\n+ the Work or Derivative Works thereof, You may choose to offer,\n+ and charge a fee for, acceptance of support, warranty, indemnity,\n+ or other liability obligations and/or rights consistent with this\n+ License. However, in accepting such obligations, You may act only\n+ on Your own behalf and on Your sole responsibility, not on behalf\n+ of any other Contributor, and only if You agree to indemnify,\n+ defend, and hold each Contributor harmless for any liability\n+ incurred by, or claims asserted against, such Contributor by reason\n+ of your accepting any such warranty or additional liability.\n+\n+ END OF TERMS AND CONDITIONS\n+\n+\n+\n+\n+ THIS PRODUCT ALSO INCLUDES THIRD PARTY SOFTWARE REDISTRIBUTED UNDER THE\n+ FOLLOWING LICENSES:\n+\n+\tApache Commons Logging,\n+\t The Apache Software License, Version 1.1 (commons-logging-1.1.1.jar)\n+\n+\t\t The Apache Software License, Version 1.1\n+\n+\t\t Redistribution and use in source and binary forms, with or without\n+\t\t modification, are permitted provided that the following conditions\n+\t\t are met:\n+\n+\t\t 1. Redistributions of source code must retain the above copyright\n+\t\t notice, this list of conditions and the following disclaimer.\n+\n+\t\t 2. Redistributions in binary form must reproduce the above copyright\n+\t\t notice, this list of conditions and the following disclaimer in\n+\t\t the documentation and/or other materials provided with the\n+\t\t distribution.\n+\n+\t\t 3. The end-user documentation included with the redistribution,\n+\t\t if any, must include the following acknowledgment:\n+\t\t \"This product includes software developed by the\n+\t\t Apache Software Foundation (http://www.apache.org/).\"\n+\t\t Alternately, this acknowledgment may appear in the software itself,\n+\t\t if and wherever such third-party acknowledgments normally appear.\n+\n+\t\t 4. The names \"Apache\" and \"Apache Software Foundation\" must\n+\t\t not be used to endorse or promote products derived from this\n+\t\t software without prior written permission. For written\n+\t\t permission, please contact apache@apache.org.\n+\n+\t\t 5. Products derived from this software may not be called \"Apache\",\n+\t\t nor may \"Apache\" appear in their name, without prior written\n+\t\t permission of the Apache Software Foundation.\n+\n+\t\t THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED\n+\t\t WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n+\t\t OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+\t\t DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR\n+\t\t ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+\t\t SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+\t\t LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n+\t\t USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+\t\t ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n+\t\t OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n+\t\t OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+\t\t SUCH DAMAGE.\n+\n+\n+\tTest messages from the Perl-MIME-Tools project,\n+\n+\t\t\t\t\t The \"Artistic License\"\n+\n+\t\t\t\t\t\tPreamble\n+\n+\t\tThe intent of this document is to state the conditions under which a\n+\t\tPackage may be copied, such that the Copyright Holder maintains some\n+\t\tsemblance of artistic control over the development of the package,\n+\t\twhile giving the users of the package the right to use and distribute\n+\t\tthe Package in a more-or-less customary fashion, plus the right to make\n+\t\treasonable modifications.\n+\n+\t\tDefinitions:\n+\n+\t\t\t\"Package\" refers to the collection of files distributed by the\n+\t\t\tCopyright Holder, and derivatives of that collection of files\n+\t\t\tcreated through textual modification.\n+\n+\t\t\t\"Standard Version\" refers to such a Package if it has not been\n+\t\t\tmodified, or has been modified in accordance with the wishes\n+\t\t\tof the Copyright Holder as specified below.\n+\n+\t\t\t\"Copyright Holder\" is whoever is named in the copyright or\n+\t\t\tcopyrights for the package.\n+\n+\t\t\t\"You\" is you, if you're thinking about copying or distributing\n+\t\t\tthis Package.\n+\n+\t\t\t\"Reasonable copying fee\" is whatever you can justify on the\n+\t\t\tbasis of media cost, duplication charges, time of people involved,\n+\t\t\tand so on. (You will not be required to justify it to the\n+\t\t\tCopyright Holder, but only to the computing community at large\n+\t\t\tas a market that must bear the fee.)\n+\n+\t\t\t\"Freely Available\" means that no fee is charged for the item\n+\t\t\titself, though there may be fees involved in handling the item.\n+\t\t\tIt also means that recipients of the item may redistribute it\n+\t\t\tunder the same conditions they received it.\n+\n+\t\t1. You may make and give away verbatim copies of the source form of the\n+\t\tStandard Version of this Package without restriction, provided that you\n+\t\tduplicate all of the original copyright notices and associated disclaimers.\n+\n+\t\t2. You may apply bug fixes, portability fixes and other modifications\n+\t\tderived from the Public Domain or from the Copyright Holder. A Package\n+\t\tmodified in such a way shall still be considered the Standard Version.\n+\n+\t\t3. You may otherwise modify your copy of this Package in any way, provided\n+\t\tthat you insert a prominent notice in each changed file stating how and\n+\t\twhen you changed that file, and provided that you do at least ONE of the\n+\t\tfollowing:\n+\n+\t\t a) place your modifications in the Public Domain or otherwise make them\n+\t\t Freely Available, such as by posting said modifications to Usenet or\n+\t\t an equivalent medium, or placing the modifications on a major archive\n+\t\t site such as uunet.uu.net, or by allowing the Copyright Holder to include\n+\t\t your modifications in the Standard Version of the Package.\n+\n+\t\t b) use the modified Package only within your corporation or organization.\n+\n+\t\t c) rename any non-standard executables so the names do not conflict\n+\t\t with standard executables, which must also be provided, and provide\n+\t\t a separate manual page for each non-standard executable that clearly\n+\t\t documents how it differs from the Standard Version.\n+\n+\t\t d) make other distribution arrangements with the Copyright Holder.\n+\n+\t\t4. You may distribute the programs of this Package in object code or\n+\t\texecutable form, provided that you do at least ONE of the following:\n+\n+\t\t a) distribute a Standard Version of the executables and library files,\n+\t\t together with instructions (in the manual page or equivalent) on where\n+\t\t to get the Standard Version.\n+\n+\t\t b) accompany the distribution with the machine-readable source of\n+\t\t the Package with your modifications.\n+\n+\t\t c) give non-standard executables non-standard names, and clearly\n+\t\t document the differences in manual pages (or equivalent), together\n+\t\t with instructions on where to get the Standard Version.\n+\n+\t\t d) make other distribution arrangements with the Copyright Holder.\n+\n+\t\t5. You may charge a reasonable copying fee for any distribution of this\n+\t\tPackage. You may charge any fee you choose for support of this\n+\t\tPackage. You may not charge a fee for this Package itself. However,\n+\t\tyou may distribute this Package in aggregate with other (possibly\n+\t\tcommercial) programs as part of a larger (possibly commercial) software\n+\t\tdistribution provided that you do not advertise this Package as a\n+\t\tproduct of your own. You may embed this Package's interpreter within\n+\t\tan executable of yours (by linking); this shall be construed as a mere\n+\t\tform of aggregation, provided that the complete Standard Version of the\n+\t\tinterpreter is so embedded.\n+\n+\t\t6. The scripts and library files supplied as input to or produced as\n+\t\toutput from the programs of this Package do not automatically fall\n+\t\tunder the copyright of this Package, but belong to whoever generated\n+\t\tthem, and may be sold commercially, and may be aggregated with this\n+\t\tPackage. If such scripts or library files are aggregated with this\n+\t\tPackage via the so-called \"undump\" or \"unexec\" methods of producing a\n+\t\tbinary executable image, then distribution of such an image shall\n+\t\tneither be construed as a distribution of this Package nor shall it\n+\t\tfall under the restrictions of Paragraphs 3 and 4, provided that you do\n+\t\tnot represent such an executable image as a Standard Version of this\n+\t\tPackage.\n+\n+\t\t7. C subroutines (or comparably compiled subroutines in other\n+\t\tlanguages) supplied by you and linked into this Package in order to\n+\t\temulate subroutines and variables of the language defined by this\n+\t\tPackage shall not be considered part of this Package, but are the\n+\t\tequivalent of input as in Paragraph 6, provided these subroutines do\n+\t\tnot change the language in any way that would cause it to fail the\n+\t\tregression tests for the language.\n+\n+\t\t8. Aggregation of this Package with a commercial distribution is always\n+\t\tpermitted provided that the use of this Package is embedded; that is,\n+\t\twhen no overt attempt is made to make this Package's interfaces visible\n+\t\tto the end user of the commercial distribution. Such use shall not be\n+\t\tconstrued as a distribution of this Package.\n+\n+\t\t9. The name of the Copyright Holder may not be used to endorse or promote\n+\t\tproducts derived from this software without specific prior written permission.\n+\n+\t\t10. THIS PACKAGE IS PROVIDED \"AS IS\" AND WITHOUT ANY EXPRESS OR\n+\t\tIMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED\n+\t\tWARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.\n+\n+\t\t\t\t\t\tThe End\n+\n+", "filename": "plugins/mapper-attachments/licenses/apache-mime4j-LICENSE.txt", "status": "added" }, { "diff": "@@ -0,0 +1,13 @@\n+ =========================================================================\n+ == NOTICE file for use with the Apache License, Version 2.0, ==\n+ =========================================================================\n+ \n+ Apache JAMES Mime4j\n+ Copyright 2004-2010 The Apache Software Foundation\n+ \n+ This product includes software developed at\n+ The Apache Software Foundation (http://www.apache.org/).\n+\n+ This product test suite includes data (mimetools-testmsgs folder) developed \n+ by Eryq and ZeeGee Software Inc as part of the \"MIME-tools\" Perl5 toolkit\n+ and licensed under the Artistic License", "filename": "plugins/mapper-attachments/licenses/apache-mime4j-NOTICE.txt", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+a81264fe0265ebe8fd1d8128aad06dc320de6eef\n\\ No newline at end of file", "filename": "plugins/mapper-attachments/licenses/apache-mime4j-core-0.7.2.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+1c289aa264548a0a1f1b43685a9cb2ab23f67287\n\\ No newline at end of file", "filename": "plugins/mapper-attachments/licenses/apache-mime4j-dom-0.7.2.jar.sha1", "status": "added" }, { "diff": "", "filename": "plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/tika-files.zip", "status": "modified" } ] }
{ "body": "We're upgrading client to elastic 5.1.2 and we noticed that the `max_determinized_states` is no longer accepted by the `query_string` query.\r\n\r\nThe following query is accepted by elastic 2.x:\r\n```\r\ncurl localhost:9200/twitter/_search -d '{\"query\":{\"query_string\":{\"query\":\"test\", \"max_determinized_states\":10}}}'\r\n```\r\n\r\nBut will fail on elastic 5.1.2 with:\r\n```\r\n\"type\":\"parsing_exception\",\"reason\":\"[query_string] query does not support [max_determinized_states]\"\r\n```\r\n\r\nI don't think it's intentional because this option is still referenced in the documentation (and actually very useful).", "comments": [ { "body": "I can have a look in a bit.", "created_at": "2017-01-20T17:48:49Z" }, { "body": "Thanks!", "created_at": "2017-01-20T17:49:31Z" }, { "body": "Done @nomoa. If you want to work around it you can use `max_determined_states`. In 5.3 that'll emit a warning header telling you to use `max_determinized_states` instead. In 6.0 only `max_determinized_states` will be supported.", "created_at": "2017-01-23T17:14:15Z" }, { "body": "Thank you Nik, this is perfect.", "created_at": "2017-01-23T22:36:28Z" } ], "number": 22722, "title": "max_determinized_states in query_string is no longer supported" }
{ "body": "There was a typo in the `ParseField` declaration. I know\r\nwe want to port these parsers to `ObjectParser` eventually\r\nbut I don't have the energy for that today and want to get\r\nthis fixed.\r\n\r\nCloses #22722\r\n", "number": 22749, "review_comments": [ { "body": "When I backport I'll add `max_determined_states` as a deprecated name.", "created_at": "2017-01-23T16:46:27Z" }, { "body": "shouldn't we change the name of the constant too?", "created_at": "2017-01-23T17:32:29Z" }, { "body": "Well caught! I'll change it.", "created_at": "2017-01-23T17:38:43Z" }, { "body": "I pushed 2e399e5505797cf5916208eefb13338a5d6f66c9", "created_at": "2017-01-23T21:53:56Z" } ], "title": "Fix parsing for `max_determinized_states`" }
{ "commits": [ { "message": "Fix parsing for `max_determinized_states`\n\nThere was a typo in the `ParseField` declaration. I know\nwe want to port these parsers to `ObjectParser` eventually\nbut I don't have the energy for that today and want to get\nthis fixed.\n\nCloses #22722" }, { "message": "Breaking changes doc" } ], "files": [ { "diff": "@@ -21,7 +21,6 @@\n \n import org.apache.lucene.queryparser.classic.MapperQueryParser;\n import org.apache.lucene.queryparser.classic.QueryParserSettings;\n-import org.apache.lucene.search.BooleanQuery;\n import org.apache.lucene.search.BoostQuery;\n import org.apache.lucene.search.FuzzyQuery;\n import org.apache.lucene.search.Query;\n@@ -92,7 +91,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue\n private static final ParseField QUOTE_ANALYZER_FIELD = new ParseField(\"quote_analyzer\");\n private static final ParseField ALLOW_LEADING_WILDCARD_FIELD = new ParseField(\"allow_leading_wildcard\");\n private static final ParseField AUTO_GENERATE_PHRASE_QUERIES_FIELD = new ParseField(\"auto_generate_phrase_queries\");\n- private static final ParseField MAX_DETERMINED_STATES_FIELD = new ParseField(\"max_determined_states\");\n+ private static final ParseField MAX_DETERMINED_STATES_FIELD = new ParseField(\"max_determinized_states\");\n private static final ParseField LOWERCASE_EXPANDED_TERMS_FIELD = new ParseField(\"lowercase_expanded_terms\")\n .withAllDeprecated(\"Decision is now made by the analyzer\");\n private static final ParseField ENABLE_POSITION_INCREMENTS_FIELD = new ParseField(\"enable_position_increment\");\n@@ -796,6 +795,8 @@ public static QueryStringQueryBuilder fromXContent(QueryParseContext parseContex\n // ignore, deprecated setting\n } else if (ALL_FIELDS_FIELD.match(currentFieldName)) {\n useAllFields = parser.booleanValue();\n+ } else if (MAX_DETERMINED_STATES_FIELD.match(currentFieldName)) {\n+ maxDeterminizedStates = parser.intValue();\n } else if (TIME_ZONE_FIELD.match(currentFieldName)) {\n try {\n timeZone = parser.text();", "filename": "core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java", "status": "modified" }, { "diff": "@@ -19,13 +19,6 @@\n \n package org.elasticsearch.index.query;\n \n-import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;\n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;\n-import static org.hamcrest.CoreMatchers.either;\n-import static org.hamcrest.CoreMatchers.equalTo;\n-import static org.hamcrest.Matchers.containsString;\n-import static org.hamcrest.Matchers.instanceOf;\n-\n import org.apache.lucene.analysis.MockSynonymAnalyzer;\n import org.apache.lucene.index.Term;\n import org.apache.lucene.queryparser.classic.MapperQueryParser;\n@@ -52,7 +45,8 @@\n import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n import org.elasticsearch.common.unit.Fuzziness;\n-import org.elasticsearch.index.mapper.MapperService;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.search.internal.SearchContext;\n import org.elasticsearch.test.AbstractQueryTestCase;\n import org.hamcrest.Matchers;\n@@ -63,6 +57,13 @@\n import java.util.Collections;\n import java.util.List;\n \n+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;\n+import static org.hamcrest.CoreMatchers.either;\n+import static org.hamcrest.CoreMatchers.equalTo;\n+import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.instanceOf;\n+\n public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStringQueryBuilder> {\n \n @Override\n@@ -511,6 +512,29 @@ public void testToQueryRegExpQueryTooComplex() throws Exception {\n assertThat(e.getMessage(), containsString(\"would result in more than 10000 states\"));\n }\n \n+ /**\n+ * Validates that {@code max_determinized_states} can be parsed and lowers the allowed number of determinized states.\n+ */\n+ public void testToQueryRegExpQueryMaxDeterminizedStatesParsing() throws Exception {\n+ assumeTrue(\"test runs only when at least a type is registered\", getCurrentTypes().length > 0);\n+ XContentBuilder builder = JsonXContent.contentBuilder();\n+ builder.startObject(); {\n+ builder.startObject(\"query_string\"); {\n+ builder.field(\"query\", \"/[ac]*a[ac]{1,10}/\");\n+ builder.field(\"default_field\", STRING_FIELD_NAME);\n+ builder.field(\"max_determinized_states\", 10);\n+ }\n+ builder.endObject();\n+ }\n+ builder.endObject();\n+\n+ QueryBuilder queryBuilder = new QueryParseContext(createParser(builder)).parseInnerQueryBuilder();\n+ TooComplexToDeterminizeException e = expectThrows(TooComplexToDeterminizeException.class,\n+ () -> queryBuilder.toQuery(createShardContext()));\n+ assertThat(e.getMessage(), containsString(\"Determinizing [ac]*\"));\n+ assertThat(e.getMessage(), containsString(\"would result in more than 10 states\"));\n+ }\n+\n public void testToQueryFuzzyQueryAutoFuzziness() throws Exception {\n assumeTrue(\"test runs only when at least a type is registered\", getCurrentTypes().length > 0);\n \n@@ -784,7 +808,7 @@ public void testFromJson() throws IOException {\n \" \\\"tie_breaker\\\" : 0.0,\\n\" +\n \" \\\"default_operator\\\" : \\\"or\\\",\\n\" +\n \" \\\"auto_generate_phrase_queries\\\" : false,\\n\" +\n- \" \\\"max_determined_states\\\" : 10000,\\n\" +\n+ \" \\\"max_determinized_states\\\" : 10000,\\n\" +\n \" \\\"enable_position_increment\\\" : true,\\n\" +\n \" \\\"fuzziness\\\" : \\\"AUTO\\\",\\n\" +\n \" \\\"fuzzy_prefix_length\\\" : 0,\\n\" +", "filename": "core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -29,6 +29,12 @@\n * The deprecated `minimum_number_should_match` parameter in the `bool` query has\n been removed, use `minimum_should_match` instead.\n \n+* The `query_string` query now correctly parses the maximum number of\n+ states allowed when\n+ \"https://en.wikipedia.org/wiki/Powerset_construction#Complexity[determinizing]\"\n+ a regex as `max_determinized_states` instead of the typo\n+ `max_determined_states`.\n+\n ==== Search shards API\n \n The search shards API no longer accepts the `type` url parameter, which didn't", "filename": "docs/reference/migration/migrate_6_0/search.asciidoc", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\r\n`\"cluster_name\": \"elasticsearch\",\r\n\"version\": {\r\n\"number\": \"2.4.0\",\r\n\"build_hash\": \"ce9f0c7394dee074091dd1bc4e9469251181fc55\",\r\n\"build_timestamp\": \"2016-08-29T09:14:17Z\",\r\n\"build_snapshot\": false,\r\n\"lucene_version\": \"5.5.2\"\r\n},\r\n\"tagline\": \"You Know, for Search\"`\r\n\r\n**Plugins installed**: []\r\n\r\n**JVM version**:\r\n\r\n**OS version**:\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nStructure of my query is following:\r\n\r\n```\r\n{\r\n \"query\": {\r\n \"function_score\": {\r\n \"query\": {\r\n \"bool\": {\r\n \"disable_coord\": \"true\",\r\n \"should\": [\r\n ... some match_phrase clauses....\r\n ],\r\n }\r\n },\r\n \"functions\": [\r\n {\"script_score\": {\"script\": \"_score\"}, \"weight\": 0.8},\r\n {\"exp\": { \"datefield\": { \"offset\": \"1d\", \"scale\": \"24w\", \"decay\": 0.5 } } , \"weight\": 0.2}\r\n ],\r\n \"score_mode\": \"avg\",\r\n \"boost_mode\": \"replace\"\r\n}\r\n```\r\n\r\nHere's what I am trying to do:\r\n\r\nFinal score needs to be = 0.9 * (_score returned by bool \"relevance\" query) + 0.1 * (_score returned by exp function)/(0.9 + 0.1)\r\n\r\nHowever, the \"replace\" boost ends up mucking up with original _score of the query which is clearly not expected. \r\n\r\nI tried using \"sum\" and \"multiply\" and both of these work in that:\r\n\r\nfor \"sum\"\r\nfinal score = _score + (0.9*_score + 0.1 * score returned by exp)\r\n\r\nfor \"multiply\"\r\nfinal score = _score * (0.9 *_score + 0.1 * score returned by exp)\r\n\r\nAny ideas? \r\n", "comments": [ { "body": "Looks like the problem is that specifying a `weight` for `script_score` returns zero instead of being applied to the score returned from the script.\n\nSo this works as you'd expect:\n\n```\nGET t/_search\n{\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"constant_score\": {\n \"filter\": {\n \"match_all\": {}\n },\n \"boost\": 2\n }\n },\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"_score * 0.8\"\n }\n },\n {\n \"field_value_factor\": {\n \"field\": \"num\"\n },\n \"weight\": 0.2\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\"\n }\n }\n}\n```\n\nHowever, this doesn't:\n\n```\nGET t/_search\n{\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"constant_score\": {\n \"filter\": {\n \"match_all\": {}\n },\n \"boost\": 2\n }\n },\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"_score\"\n },\n \"weight\": 0.8\n },\n {\n \"field_value_factor\": {\n \"field\": \"num\"\n },\n \"weight\": 0.2\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\"\n }\n }\n}\n```\n", "created_at": "2016-11-19T16:14:20Z" } ], "number": 21483, "title": "Script score doesn't support weight correctly" }
{ "body": "The weight factor function does not check if the delegate score function needs to access the score of the query.\r\nThis results in a _score equals to 0 for all score function that set a weight.\r\nThis change modifies the WeightFactorFunction#needsScore to delegate the call to its underlying score function.\r\n\r\nFix #21483", "number": 22713, "review_comments": [], "title": "Fix script score function that combines _score and weight" }
{ "commits": [ { "message": "Fix script score function that combines _score and weight\n\nThe weight factor function does not check if the delegate score function needs to access the score of the query.\nThis results in a _score equals to 0 for all score function that set a weight.\nThis change modifies the WeightFactorFunction#needsScore to delegate the call to its underlying score function.\n\nFix #21483" } ], "files": [ { "diff": "@@ -68,7 +68,7 @@ public Explanation explainScore(int docId, Explanation subQueryScore) throws IOE\n \n @Override\n public boolean needsScores() {\n- return false;\n+ return scoreFunction.needsScores();\n }\n \n public Explanation explainWeight() {", "filename": "core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java", "status": "modified" }, { "diff": "@@ -751,6 +751,33 @@ public void testExplanationAndScoreEqualsEvenIfNoFunctionMatches() throws IOExce\n assertThat(searchResult.scoreDocs[0].score, equalTo(explanation.getValue()));\n }\n \n+ public void testWeightFactorNeedsScore() {\n+ for (boolean needsScore : new boolean[] {true, false}) {\n+ WeightFactorFunction function = new WeightFactorFunction(10.0f, new ScoreFunction(CombineFunction.REPLACE) {\n+ @Override\n+ public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException {\n+ return null;\n+ }\n+\n+ @Override\n+ public boolean needsScores() {\n+ return needsScore;\n+ }\n+\n+ @Override\n+ protected boolean doEquals(ScoreFunction other) {\n+ return false;\n+ }\n+\n+ @Override\n+ protected int doHashCode() {\n+ return 0;\n+ }\n+ });\n+ assertEquals(needsScore, function.needsScores());\n+ }\n+ }\n+\n private static class DummyScoreFunction extends ScoreFunction {\n protected DummyScoreFunction(CombineFunction scoreCombiner) {\n super(scoreCombiner);", "filename": "core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java", "status": "modified" } ] }
{ "body": "When an `ElasticsearchException` is bubbled up to HTTP, there are additional headers being returned.\n\n``` bash\n# curl -X DELETE localhost:9200/test\n{\"acknowledged\":true}\n\n# curl -X PUT localhost:9200/test -v\n* Trying 127.0.0.1...\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> PUT /test HTTP/1.1\n> Host: localhost:9200\n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 21\n<\n* Connection #0 to host localhost left intact\n{\"acknowledged\":true}\n\n\n# curl -X PUT localhost:9200/test -v\n* Trying 127.0.0.1...\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> PUT /test HTTP/1.1\n> Host: localhost:9200\n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n< HTTP/1.1 400 Bad Request\n< es.index: test\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 203\n<\n* Connection #0 to host localhost left intact\n{\"error\":{\"root_cause\":[{\"type\":\"index_already_exists_exception\",\"reason\":\"already exists\",\"index\":\"test\"}],\"type\":\"index_already_exists_exception\",\"reason\":\"already exists\",\"index\":\"test\"},\"status\":400}\n```\n\nIf those headers are not needed on HTTP, maybe it does not make any sense to return them? All the information looks available to me in the HTTP response body, but I am possibly missing some cases.\n", "comments": [ { "body": "@clintongormley do you know if this feature is used by the clients?\n", "created_at": "2016-04-08T09:28:09Z" }, { "body": "@jpountz i'm unaware of anybody using this feature.\n", "created_at": "2016-04-12T14:12:02Z" }, { "body": "Let's remove these headers\n", "created_at": "2016-06-17T09:54:06Z" }, { "body": "These headers are printed out as part of the response body already, I don't think there's any need to send them back as response headers too. They were introduced to prevent having to add custom exceptions to our codebase whenever we need to throw an exception that has to hold some additional metadata that ElasticsearchException doesn't support. Maybe the headers naming should be changed as well to something like metadata? @s1monw @tlrx what do you think?", "created_at": "2017-01-18T13:56:02Z" } ], "number": 17593, "title": "Stop returning additional HTTP headers on exceptions" }
{ "body": "Move \"es.\" internal headers to separate metadata set in `ElasticsearchException` and stop returning them as response headers. These headers are printed out as part of the response body already, they weren't meant to be sent back as response headers too. They were introduced to prevent having to add custom exceptions to our codebase whenever we need to throw an exception that has to hold some additional metadata that `ElasticsearchException` doesn't support out of the box. The header notion stays in `ElasticsearchException` but only for what actually needs to be returned as response header (no \"es.\" prefix).\r\n\r\nAlso removed `ESExceptionTests` and moved its tests under `ElasticsearchExceptionTests` or `ExceptionSerializationTests`\r\n\r\nCloses #17593\r\n", "number": 22703, "review_comments": [ { "body": "Maybe remove \"instead\" from the exception message?", "created_at": "2017-01-20T10:31:53Z" }, { "body": "Yes, I changed it in #22675", "created_at": "2017-01-20T10:43:24Z" }, { "body": "I think it's OK here because the token is a value, not an array. But later in the loop we need to take care of arrays.\r\n\r\nI think we can already do it for values and just ignore objects for now", "created_at": "2017-01-20T10:44:39Z" }, { "body": "this line is duplicated", "created_at": "2017-01-20T10:57:47Z" }, { "body": "ok I will remove the TODO then", "created_at": "2017-01-23T10:41:14Z" }, { "body": "Are we testing this somewhere?", "created_at": "2017-01-23T10:42:26Z" }, { "body": "good catch", "created_at": "2017-01-23T10:43:02Z" } ], "title": "Stop returning \"es.\" internal exception headers as http response headers" }
{ "commits": [ { "message": "move \"es.\" internal headers to separate metadata set in ElasticsearchException and stop returning them as response headers\n\nCloses #17593" }, { "message": "[TEST] remove ESExceptionTests, move its methods to ElasticsearchExceptionTests or ExceptionSerializationTests" }, { "message": "address some review comments" }, { "message": "update TODO on parsing exceptions back with a potential plan" } ], "files": [ { "diff": "@@ -161,7 +161,6 @@\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]ingest[/\\\\]SimulatePipelineRequestBuilder.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]ingest[/\\\\]SimulatePipelineTransportAction.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]MultiSearchRequestBuilder.java\" checks=\"LineLength\" />\n- <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]SearchPhaseExecutionException.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]SearchResponse.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]ShardSearchFailure.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]TransportClearScrollAction.java\" checks=\"LineLength\" />\n@@ -533,7 +532,6 @@\n <suppress files=\"core[/\\\\]src[/\\\\]main[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]threadpool[/\\\\]ThreadPool.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]apache[/\\\\]lucene[/\\\\]queries[/\\\\]BlendedTermQueryTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]apache[/\\\\]lucene[/\\\\]search[/\\\\]postingshighlight[/\\\\]CustomPostingsHighlighterTests.java\" checks=\"LineLength\" />\n- <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]ESExceptionTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]NamingConventionTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]VersionTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]RejectionActionIT.java\" checks=\"LineLength\" />", "filename": "buildSrc/src/main/resources/checkstyle_suppressions.xml", "status": "modified" }, { "diff": "@@ -37,8 +37,9 @@\n \n import java.io.IOException;\n import java.util.Arrays;\n+import java.util.Collections;\n import java.util.HashMap;\n-import java.util.HashSet;\n+import java.util.Iterator;\n import java.util.List;\n import java.util.Map;\n import java.util.Set;\n@@ -56,14 +57,14 @@\n */\n public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable {\n \n- static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0);\n+ private static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0);\n \n /**\n * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)}\n * to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is\n * internal only and not available as a URL parameter.\n */\n- public static final String REST_EXCEPTION_SKIP_CAUSE = \"rest.exception.cause.skip\";\n+ private static final String REST_EXCEPTION_SKIP_CAUSE = \"rest.exception.cause.skip\";\n /**\n * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)}\n * to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is\n@@ -72,11 +73,11 @@ public class ElasticsearchException extends RuntimeException implements ToXConte\n public static final String REST_EXCEPTION_SKIP_STACK_TRACE = \"rest.exception.stacktrace.skip\";\n public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;\n private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;\n- private static final String INDEX_HEADER_KEY = \"es.index\";\n- private static final String INDEX_HEADER_KEY_UUID = \"es.index_uuid\";\n- private static final String SHARD_HEADER_KEY = \"es.shard\";\n- private static final String RESOURCE_HEADER_TYPE_KEY = \"es.resource.type\";\n- private static final String RESOURCE_HEADER_ID_KEY = \"es.resource.id\";\n+ private static final String INDEX_METADATA_KEY = \"es.index\";\n+ private static final String INDEX_METADATA_KEY_UUID = \"es.index_uuid\";\n+ private static final String SHARD_METADATA_KEY = \"es.shard\";\n+ private static final String RESOURCE_METADATA_TYPE_KEY = \"es.resource.type\";\n+ private static final String RESOURCE_METADATA_ID_KEY = \"es.resource.id\";\n \n private static final String TYPE = \"type\";\n private static final String REASON = \"reason\";\n@@ -88,6 +89,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte\n \n private static final Map<Integer, CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException>> ID_TO_SUPPLIER;\n private static final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE;\n+ private final Map<String, List<String>> metadata = new HashMap<>();\n private final Map<String, List<String>> headers = new HashMap<>();\n \n /**\n@@ -129,24 +131,78 @@ public ElasticsearchException(StreamInput in) throws IOException {\n super(in.readOptionalString(), in.readException());\n readStackTrace(this, in);\n headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));\n+ //TODO change to onOrAfter once backported to 5.x\n+ if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) {\n+ metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));\n+ } else {\n+ for (Iterator<Map.Entry<String, List<String>>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) {\n+ Map.Entry<String, List<String>> header = iterator.next();\n+ if (header.getKey().startsWith(\"es.\")) {\n+ metadata.put(header.getKey(), header.getValue());\n+ iterator.remove();\n+ }\n+ }\n+ }\n }\n \n /**\n- * Adds a new header with the given key.\n- * This method will replace existing header if a header with the same key already exists\n+ * Adds a new piece of metadata with the given key.\n+ * If the provided key is already present, the corresponding metadata will be replaced\n */\n- public void addHeader(String key, String... value) {\n- this.headers.put(key, Arrays.asList(value));\n+ public void addMetadata(String key, String... values) {\n+ addMetadata(key, Arrays.asList(values));\n+ }\n+\n+ /**\n+ * Adds a new piece of metadata with the given key.\n+ * If the provided key is already present, the corresponding metadata will be replaced\n+ */\n+ public void addMetadata(String key, List<String> values) {\n+ //we need to enforce this otherwise bw comp doesn't work properly, as \"es.\" was the previous criteria to split headers in two sets\n+ if (key.startsWith(\"es.\") == false) {\n+ throw new IllegalArgumentException(\"exception metadata must start with [es.], found [\" + key + \"] instead\");\n+ }\n+ this.metadata.put(key, values);\n+ }\n+\n+ /**\n+ * Returns a set of all metadata keys on this exception\n+ */\n+ public Set<String> getMetadataKeys() {\n+ return metadata.keySet();\n+ }\n+\n+ /**\n+ * Returns the list of metadata values for the given key or {@code null} if no metadata for the\n+ * given key exists.\n+ */\n+ public List<String> getMetadata(String key) {\n+ return metadata.get(key);\n+ }\n+\n+ protected Map<String, List<String>> getMetadata() {\n+ return metadata;\n }\n \n /**\n * Adds a new header with the given key.\n * This method will replace existing header if a header with the same key already exists\n */\n public void addHeader(String key, List<String> value) {\n+ //we need to enforce this otherwise bw comp doesn't work properly, as \"es.\" was the previous criteria to split headers in two sets\n+ if (key.startsWith(\"es.\")) {\n+ throw new IllegalArgumentException(\"exception headers must not start with [es.], found [\" + key + \"] instead\");\n+ }\n this.headers.put(key, value);\n }\n \n+ /**\n+ * Adds a new header with the given key.\n+ * This method will replace existing header if a header with the same key already exists\n+ */\n+ public void addHeader(String key, String... value) {\n+ addHeader(key, Arrays.asList(value));\n+ }\n \n /**\n * Returns a set of all header keys on this exception\n@@ -156,7 +212,7 @@ public Set<String> getHeaderKeys() {\n }\n \n /**\n- * Returns the list of header values for the given key or {@code null} if not header for the\n+ * Returns the list of header values for the given key or {@code null} if no header for the\n * given key exists.\n */\n public List<String> getHeader(String key) {\n@@ -227,7 +283,16 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeOptionalString(this.getMessage());\n out.writeException(this.getCause());\n writeStackTraces(this, out);\n- out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);\n+ //TODO change to onOrAfter once backported to 5.x\n+ if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) {\n+ out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);\n+ out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString);\n+ } else {\n+ HashMap<String, List<String>> finalHeaders = new HashMap<>(headers.size() + metadata.size());\n+ finalHeaders.putAll(headers);\n+ finalHeaders.putAll(metadata);\n+ out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString);\n+ }\n }\n \n public static ElasticsearchException readException(StreamInput input, int id) throws IOException {\n@@ -266,24 +331,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n if (ex != this) {\n generateThrowableXContent(builder, params, this);\n } else {\n- innerToXContent(builder, params, this, getExceptionName(), getMessage(), headers, getCause());\n+ innerToXContent(builder, params, this, getExceptionName(), getMessage(), headers, metadata, getCause());\n }\n return builder;\n }\n \n protected static void innerToXContent(XContentBuilder builder, Params params,\n Throwable throwable, String type, String message, Map<String, List<String>> headers,\n- Throwable cause) throws IOException {\n+ Map<String, List<String>> metadata, Throwable cause) throws IOException {\n builder.field(TYPE, type);\n builder.field(REASON, message);\n \n- Set<String> customHeaders = new HashSet<>();\n- for (String key : headers.keySet()) {\n- if (key.startsWith(\"es.\")) {\n- headerToXContent(builder, key.substring(\"es.\".length()), headers.get(key));\n- } else {\n- customHeaders.add(key);\n- }\n+ for (Map.Entry<String, List<String>> entry : metadata.entrySet()) {\n+ headerToXContent(builder, entry.getKey().substring(\"es.\".length()), entry.getValue());\n }\n \n if (throwable instanceof ElasticsearchException) {\n@@ -300,10 +360,10 @@ protected static void innerToXContent(XContentBuilder builder, Params params,\n }\n }\n \n- if (customHeaders.isEmpty() == false) {\n+ if (headers.isEmpty() == false) {\n builder.startObject(HEADER);\n- for (String header : customHeaders) {\n- headerToXContent(builder, header, headers.get(header));\n+ for (Map.Entry<String, List<String>> entry : headers.entrySet()) {\n+ headerToXContent(builder, entry.getKey(), entry.getValue());\n }\n builder.endObject();\n }\n@@ -336,7 +396,7 @@ protected void metadataToXContent(XContentBuilder builder, Params params) throws\n /**\n * Static toXContent helper method that renders {@link org.elasticsearch.ElasticsearchException} or {@link Throwable} instances\n * as XContent, delegating the rendering to {@link #toXContent(XContentBuilder, Params)}\n- * or {@link #innerToXContent(XContentBuilder, Params, Throwable, String, String, Map, Throwable)}.\n+ * or {@link #innerToXContent(XContentBuilder, Params, Throwable, String, String, Map, Map, Throwable)}.\n *\n * This method is usually used when the {@link Throwable} is rendered as a part of another XContent object.\n */\n@@ -346,7 +406,7 @@ public static void generateThrowableXContent(XContentBuilder builder, Params par\n if (t instanceof ElasticsearchException) {\n ((ElasticsearchException) t).toXContent(builder, params);\n } else {\n- innerToXContent(builder, params, t, getExceptionName(t), t.getMessage(), emptyMap(), t.getCause());\n+ innerToXContent(builder, params, t, getExceptionName(t), t.getMessage(), emptyMap(), emptyMap(), t.getCause());\n }\n }\n \n@@ -410,6 +470,7 @@ public static ElasticsearchException fromXContent(XContentParser parser) throws\n \n String type = null, reason = null, stack = null;\n ElasticsearchException cause = null;\n+ Map<String, List<String>> metadata = new HashMap<>();\n Map<String, Object> headers = new HashMap<>();\n \n do {\n@@ -423,8 +484,7 @@ public static ElasticsearchException fromXContent(XContentParser parser) throws\n } else if (STACK_TRACE.equals(currentFieldName)) {\n stack = parser.text();\n } else {\n- // Everything else is considered as a header\n- headers.put(currentFieldName, parser.text());\n+ metadata.put(currentFieldName, Collections.singletonList(parser.text()));\n }\n } else if (token == XContentParser.Token.START_OBJECT) {\n if (CAUSED_BY.equals(currentFieldName)) {\n@@ -446,6 +506,16 @@ public static ElasticsearchException fromXContent(XContentParser parser) throws\n message.append(']');\n \n ElasticsearchException e = new ElasticsearchException(message.toString(), cause);\n+\n+ for (Map.Entry<String, List<String>> entry : metadata.entrySet()) {\n+ //subclasses can print out additional metadata through the metadataToXContent method. Simple key-value pairs will be\n+ //parsed back and become part of this metadata set, while objects and arrays are not supported when parsing back.\n+ //Those key-value pairs become part of the metadata set and inherit the \"es.\" prefix as that is currently required\n+ //by addMetadata. The prefix will get stripped out when printing metadata out so it will be effectively invisible.\n+ //TODO move subclasses that print out simple metadata to using addMetadata directly and support also numbers and booleans.\n+ //TODO rename metadataToXContent and have only SearchPhaseExecutionException use it, which prints out complex objects\n+ e.addMetadata(\"es.\" + entry.getKey(), entry.getValue());\n+ }\n for (Map.Entry<String, Object> header : headers.entrySet()) {\n e.addHeader(header.getKey(), String.valueOf(header.getValue()));\n }\n@@ -500,9 +570,9 @@ public static String getExceptionName(Throwable ex) {\n @Override\n public String toString() {\n StringBuilder builder = new StringBuilder();\n- if (headers.containsKey(INDEX_HEADER_KEY)) {\n+ if (metadata.containsKey(INDEX_METADATA_KEY)) {\n builder.append(getIndex());\n- if (headers.containsKey(SHARD_HEADER_KEY)) {\n+ if (metadata.containsKey(SHARD_METADATA_KEY)) {\n builder.append('[').append(getShardId()).append(']');\n }\n builder.append(' ');\n@@ -863,17 +933,17 @@ <E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> excepti\n }\n \n public Index getIndex() {\n- List<String> index = getHeader(INDEX_HEADER_KEY);\n+ List<String> index = getMetadata(INDEX_METADATA_KEY);\n if (index != null && index.isEmpty() == false) {\n- List<String> index_uuid = getHeader(INDEX_HEADER_KEY_UUID);\n+ List<String> index_uuid = getMetadata(INDEX_METADATA_KEY_UUID);\n return new Index(index.get(0), index_uuid.get(0));\n }\n \n return null;\n }\n \n public ShardId getShardId() {\n- List<String> shard = getHeader(SHARD_HEADER_KEY);\n+ List<String> shard = getMetadata(SHARD_METADATA_KEY);\n if (shard != null && shard.isEmpty() == false) {\n return new ShardId(getIndex(), Integer.parseInt(shard.get(0)));\n }\n@@ -882,8 +952,8 @@ public ShardId getShardId() {\n \n public void setIndex(Index index) {\n if (index != null) {\n- addHeader(INDEX_HEADER_KEY, index.getName());\n- addHeader(INDEX_HEADER_KEY_UUID, index.getUUID());\n+ addMetadata(INDEX_METADATA_KEY, index.getName());\n+ addMetadata(INDEX_METADATA_KEY_UUID, index.getUUID());\n }\n }\n \n@@ -896,27 +966,22 @@ public void setIndex(String index) {\n public void setShard(ShardId shardId) {\n if (shardId != null) {\n setIndex(shardId.getIndex());\n- addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id()));\n+ addMetadata(SHARD_METADATA_KEY, Integer.toString(shardId.id()));\n }\n }\n \n- public void setShard(String index, int shardId) {\n- setIndex(index);\n- addHeader(SHARD_HEADER_KEY, Integer.toString(shardId));\n- }\n-\n public void setResources(String type, String... id) {\n assert type != null;\n- addHeader(RESOURCE_HEADER_ID_KEY, id);\n- addHeader(RESOURCE_HEADER_TYPE_KEY, type);\n+ addMetadata(RESOURCE_METADATA_ID_KEY, id);\n+ addMetadata(RESOURCE_METADATA_TYPE_KEY, type);\n }\n \n public List<String> getResourceId() {\n- return getHeader(RESOURCE_HEADER_ID_KEY);\n+ return getMetadata(RESOURCE_METADATA_ID_KEY);\n }\n \n public String getResourceType() {\n- List<String> header = getHeader(RESOURCE_HEADER_TYPE_KEY);\n+ List<String> header = getMetadata(RESOURCE_METADATA_TYPE_KEY);\n if (header != null && header.isEmpty() == false) {\n assert header.size() == 1;\n return header.get(0);", "filename": "core/src/main/java/org/elasticsearch/ElasticsearchException.java", "status": "modified" }, { "diff": "@@ -138,7 +138,8 @@ protected void metadataToXContent(XContentBuilder builder, Params params) throws\n builder.field(\"grouped\", group); // notify that it's grouped\n builder.field(\"failed_shards\");\n builder.startArray();\n- ShardOperationFailedException[] failures = params.paramAsBoolean(\"group_shard_failures\", true) ? ExceptionsHelper.groupBy(shardFailures) : shardFailures;\n+ ShardOperationFailedException[] failures = params.paramAsBoolean(\"group_shard_failures\", true) ?\n+ ExceptionsHelper.groupBy(shardFailures) : shardFailures;\n for (ShardOperationFailedException failure : failures) {\n builder.startObject();\n failure.toXContent(builder, params);\n@@ -156,7 +157,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n // We don't have a cause when all shards failed, but we do have shards failures so we can \"guess\" a cause\n // (see {@link #getCause()}). Here, we use super.getCause() because we don't want the guessed exception to\n // be rendered twice (one in the \"cause\" field, one in \"failed_shards\")\n- innerToXContent(builder, params, this, getExceptionName(), getMessage(), getHeaders(), super.getCause());\n+ innerToXContent(builder, params, this, getExceptionName(), getMessage(), getHeaders(), getMetadata(), super.getCause());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java", "status": "modified" }, { "diff": "@@ -438,7 +438,6 @@ private void onShardOperation(final NodeRequest request, final Object[] shardRes\n } catch (Exception e) {\n BroadcastShardOperationFailedException failure =\n new BroadcastShardOperationFailedException(shardRouting.shardId(), \"operation \" + actionName + \" failed\", e);\n- failure.setIndex(shardRouting.getIndexName());\n failure.setShard(shardRouting.shardId());\n shardResults[shardIndex] = failure;\n if (TransportActions.isShardNotAvailableException(e)) {", "filename": "core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java", "status": "modified" }, { "diff": "@@ -38,8 +38,7 @@ public final class NotSerializableExceptionWrapper extends ElasticsearchExceptio\n private final RestStatus status;\n \n public NotSerializableExceptionWrapper(Throwable other) {\n- super(ElasticsearchException.getExceptionName(other) +\n- \": \" + other.getMessage(), other.getCause());\n+ super(ElasticsearchException.getExceptionName(other) + \": \" + other.getMessage(), other.getCause());\n this.name = ElasticsearchException.getExceptionName(other);\n this.status = ExceptionsHelper.status(other);\n setStackTrace(other.getStackTrace());\n@@ -51,6 +50,9 @@ public NotSerializableExceptionWrapper(Throwable other) {\n for (String key : ex.getHeaderKeys()) {\n this.addHeader(key, ex.getHeader(key));\n }\n+ for (String key : ex.getMetadataKeys()) {\n+ this.addMetadata(key, ex.getMetadata(key));\n+ }\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/io/stream/NotSerializableExceptionWrapper.java", "status": "modified" }, { "diff": "@@ -21,73 +21,338 @@\n \n import org.apache.lucene.util.Constants;\n import org.elasticsearch.action.RoutingMissingException;\n+import org.elasticsearch.action.search.SearchPhaseExecutionException;\n+import org.elasticsearch.action.search.ShardSearchFailure;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;\n import org.elasticsearch.cluster.block.ClusterBlockException;\n+import org.elasticsearch.common.ParsingException;\n+import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.common.xcontent.XContentHelper;\n+import org.elasticsearch.common.xcontent.XContentLocation;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.discovery.DiscoverySettings;\n+import org.elasticsearch.index.Index;\n+import org.elasticsearch.index.IndexNotFoundException;\n+import org.elasticsearch.index.query.QueryShardException;\n import org.elasticsearch.index.shard.IndexShardRecoveringException;\n import org.elasticsearch.index.shard.ShardId;\n+import org.elasticsearch.rest.RestStatus;\n+import org.elasticsearch.search.SearchParseException;\n+import org.elasticsearch.search.SearchShardTarget;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.test.TestSearchContext;\n+import org.elasticsearch.transport.RemoteTransportException;\n import org.hamcrest.Matcher;\n \n+import java.io.EOFException;\n+import java.io.FileNotFoundException;\n import java.io.IOException;\n import java.util.Collections;\n \n import static java.util.Collections.singleton;\n-import static org.hamcrest.CoreMatchers.equalTo;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;\n import static org.hamcrest.CoreMatchers.hasItem;\n-import static org.hamcrest.CoreMatchers.startsWith;\n+import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.hasSize;\n+import static org.hamcrest.Matchers.startsWith;\n \n public class ElasticsearchExceptionTests extends ESTestCase {\n \n+ public void testStatus() {\n+ ElasticsearchException exception = new ElasticsearchException(\"test\");\n+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));\n+\n+ exception = new ElasticsearchException(\"test\", new RuntimeException());\n+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));\n+\n+ exception = new ElasticsearchException(\"test\", new ResourceNotFoundException(\"test\"));\n+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));\n+\n+ exception = new RemoteTransportException(\"test\", new ResourceNotFoundException(\"test\"));\n+ assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));\n+\n+ exception = new RemoteTransportException(\"test\", new ResourceAlreadyExistsException(\"test\"));\n+ assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));\n+\n+ exception = new RemoteTransportException(\"test\", new IllegalArgumentException(\"foobar\"));\n+ assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));\n+\n+ exception = new RemoteTransportException(\"test\", new IllegalStateException(\"foobar\"));\n+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));\n+ }\n+\n+ public void testGuessRootCause() {\n+ {\n+ ElasticsearchException exception = new ElasticsearchException(\"foo\", new ElasticsearchException(\"bar\",\n+ new IndexNotFoundException(\"foo\", new RuntimeException(\"foobar\"))));\n+ ElasticsearchException[] rootCauses = exception.guessRootCauses();\n+ assertEquals(rootCauses.length, 1);\n+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), \"index_not_found_exception\");\n+ assertEquals(rootCauses[0].getMessage(), \"no such index\");\n+ ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 1));\n+ ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 2));\n+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException(\"search\", \"all shards failed\",\n+ new ShardSearchFailure[]{failure, failure1});\n+ if (randomBoolean()) {\n+ rootCauses = (randomBoolean() ? new RemoteTransportException(\"remoteboom\", ex) : ex).guessRootCauses();\n+ } else {\n+ rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException(\"remoteboom\", ex) : ex);\n+ }\n+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), \"parsing_exception\");\n+ assertEquals(rootCauses[0].getMessage(), \"foobar\");\n+\n+ ElasticsearchException oneLevel = new ElasticsearchException(\"foo\", new RuntimeException(\"foobar\"));\n+ rootCauses = oneLevel.guessRootCauses();\n+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), \"exception\");\n+ assertEquals(rootCauses[0].getMessage(), \"foo\");\n+ }\n+ {\n+ ShardSearchFailure failure = new ShardSearchFailure(\n+ new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 1));\n+ ShardSearchFailure failure1 = new ShardSearchFailure(new QueryShardException(new Index(\"foo1\", \"_na_\"), \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo1\", \"_na_\"), 1));\n+ ShardSearchFailure failure2 = new ShardSearchFailure(new QueryShardException(new Index(\"foo1\", \"_na_\"), \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo1\", \"_na_\"), 2));\n+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException(\"search\", \"all shards failed\",\n+ new ShardSearchFailure[]{failure, failure1, failure2});\n+ final ElasticsearchException[] rootCauses = ex.guessRootCauses();\n+ assertEquals(rootCauses.length, 2);\n+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), \"parsing_exception\");\n+ assertEquals(rootCauses[0].getMessage(), \"foobar\");\n+ assertEquals(((ParsingException) rootCauses[0]).getLineNumber(), 1);\n+ assertEquals(((ParsingException) rootCauses[0]).getColumnNumber(), 2);\n+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), \"query_shard_exception\");\n+ assertEquals((rootCauses[1]).getIndex().getName(), \"foo1\");\n+ assertEquals(rootCauses[1].getMessage(), \"foobar\");\n+ }\n+\n+ {\n+ final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException(\"foobar\"));\n+ assertEquals(foobars.length, 1);\n+ assertTrue(foobars[0] instanceof ElasticsearchException);\n+ assertEquals(foobars[0].getMessage(), \"foobar\");\n+ assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class);\n+ assertEquals(foobars[0].getExceptionName(), \"illegal_argument_exception\");\n+ }\n+ }\n+\n+ public void testDeduplicate() throws IOException {\n+ {\n+ ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 1));\n+ ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 2));\n+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException(\"search\", \"all shards failed\",\n+ randomBoolean() ? failure1.getCause() : failure.getCause(), new ShardSearchFailure[]{failure, failure1});\n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+ builder.startObject();\n+ ex.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ builder.endObject();\n+ String expected = \"{\\\"type\\\":\\\"search_phase_execution_exception\\\",\\\"reason\\\":\\\"all shards failed\\\",\\\"phase\\\":\\\"search\\\",\" +\n+ \"\\\"grouped\\\":true,\\\"failed_shards\\\":[{\\\"shard\\\":1,\\\"index\\\":\\\"foo\\\",\\\"node\\\":\\\"node_1\\\",\\\"reason\\\":\" +\n+ \"{\\\"type\\\":\\\"parsing_exception\\\",\\\"reason\\\":\\\"foobar\\\",\\\"line\\\":1,\\\"col\\\":2}}]}\";\n+ assertEquals(expected, builder.string());\n+ }\n+ {\n+ ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 1));\n+ ShardSearchFailure failure1 = new ShardSearchFailure(new QueryShardException(new Index(\"foo1\", \"_na_\"), \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo1\", \"_na_\"), 1));\n+ ShardSearchFailure failure2 = new ShardSearchFailure(new QueryShardException(new Index(\"foo1\", \"_na_\"), \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo1\", \"_na_\"), 2));\n+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException(\"search\", \"all shards failed\",\n+ new ShardSearchFailure[]{failure, failure1, failure2});\n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+ builder.startObject();\n+ ex.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ builder.endObject();\n+ String expected = \"{\\\"type\\\":\\\"search_phase_execution_exception\\\",\\\"reason\\\":\\\"all shards failed\\\",\" +\n+ \"\\\"phase\\\":\\\"search\\\",\\\"grouped\\\":true,\\\"failed_shards\\\":[{\\\"shard\\\":1,\\\"index\\\":\\\"foo\\\",\\\"node\\\":\\\"node_1\\\",\" +\n+ \"\\\"reason\\\":{\\\"type\\\":\\\"parsing_exception\\\",\\\"reason\\\":\\\"foobar\\\",\\\"line\\\":1,\\\"col\\\":2}},{\\\"shard\\\":1,\" +\n+ \"\\\"index\\\":\\\"foo1\\\",\\\"node\\\":\\\"node_1\\\",\\\"reason\\\":{\\\"type\\\":\\\"query_shard_exception\\\",\\\"reason\\\":\\\"foobar\\\",\" +\n+ \"\\\"index_uuid\\\":\\\"_na_\\\",\\\"index\\\":\\\"foo1\\\"}}]}\";\n+ assertEquals(expected, builder.string());\n+ }\n+ {\n+ ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 1));\n+ ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, \"foobar\", null),\n+ new SearchShardTarget(\"node_1\", new Index(\"foo\", \"_na_\"), 2));\n+ NullPointerException nullPointerException = new NullPointerException();\n+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException(\"search\", \"all shards failed\", nullPointerException,\n+ new ShardSearchFailure[]{failure, failure1});\n+ assertEquals(nullPointerException, ex.getCause());\n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+ builder.startObject();\n+ ex.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ builder.endObject();\n+ String expected = \"{\\\"type\\\":\\\"search_phase_execution_exception\\\",\\\"reason\\\":\\\"all shards failed\\\",\" +\n+ \"\\\"phase\\\":\\\"search\\\",\\\"grouped\\\":true,\\\"failed_shards\\\":[{\\\"shard\\\":1,\\\"index\\\":\\\"foo\\\",\\\"node\\\":\\\"node_1\\\",\" +\n+ \"\\\"reason\\\":{\\\"type\\\":\\\"parsing_exception\\\",\\\"reason\\\":\\\"foobar\\\",\\\"line\\\":1,\\\"col\\\":2}}],\" +\n+ \"\\\"caused_by\\\":{\\\"type\\\":\\\"null_pointer_exception\\\",\\\"reason\\\":null}}\";\n+ assertEquals(expected, builder.string());\n+ }\n+ }\n+\n+ /**\n+ * Check whether this exception contains an exception of the given type:\n+ * either it is of the given class itself or it contains a nested cause\n+ * of the given type.\n+ *\n+ * @param exType the exception type to look for\n+ * @return whether there is a nested exception of the specified type\n+ */\n+ private static boolean contains(Throwable t, Class<? extends Throwable> exType) {\n+ if (exType == null) {\n+ return false;\n+ }\n+ for (Throwable cause = t; t != null; t = t.getCause()) {\n+ if (exType.isInstance(cause)) {\n+ return true;\n+ }\n+ }\n+ return false;\n+ }\n+\n+ public void testGetRootCause() {\n+ Exception root = new RuntimeException(\"foobar\");\n+ ElasticsearchException exception = new ElasticsearchException(\"foo\", new ElasticsearchException(\"bar\",\n+ new IllegalArgumentException(\"index is closed\", root)));\n+ assertEquals(root, exception.getRootCause());\n+ assertTrue(contains(exception, RuntimeException.class));\n+ assertFalse(contains(exception, EOFException.class));\n+ }\n+\n+ public void testToString() {\n+ ElasticsearchException exception = new ElasticsearchException(\"foo\", new ElasticsearchException(\"bar\",\n+ new IllegalArgumentException(\"index is closed\", new RuntimeException(\"foobar\"))));\n+ assertEquals(\"ElasticsearchException[foo]; nested: ElasticsearchException[bar]; nested: IllegalArgumentException\" +\n+ \"[index is closed]; nested: RuntimeException[foobar];\", exception.toString());\n+ }\n+\n public void testToXContent() throws IOException {\n- ElasticsearchException e = new ElasticsearchException(\"test\");\n- assertExceptionAsJson(e, false, equalTo(\"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"test\\\"}\"));\n-\n- e = new IndexShardRecoveringException(new ShardId(\"_test\", \"_0\", 5));\n- assertExceptionAsJson(e, false, equalTo(\"{\\\"type\\\":\\\"index_shard_recovering_exception\\\",\" +\n- \"\\\"reason\\\":\\\"CurrentState[RECOVERING] Already recovering\\\",\\\"index_uuid\\\":\\\"_0\\\",\\\"shard\\\":\\\"5\\\",\\\"index\\\":\\\"_test\\\"}\"));\n-\n- e = new BroadcastShardOperationFailedException(new ShardId(\"_index\", \"_uuid\", 12), \"foo\", new IllegalStateException(\"bar\"));\n- assertExceptionAsJson(e, false, equalTo(\"{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"bar\\\"}\"));\n-\n- e = new ElasticsearchException(new IllegalArgumentException(\"foo\"));\n- assertExceptionAsJson(e, false, equalTo(\"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"java.lang.IllegalArgumentException: foo\\\",\" +\n- \"\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_argument_exception\\\",\\\"reason\\\":\\\"foo\\\"}}\"));\n-\n- e = new ElasticsearchException(\"foo\", new IllegalStateException(\"bar\"));\n- assertExceptionAsJson(e, false, equalTo(\"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"foo\\\",\" +\n- \"\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"bar\\\"}}\"));\n-\n- // Test the same exception but with the \"rest.exception.stacktrace.skip\" parameter disabled: the stack_trace must be present\n- // in the JSON. Since the stack can be large, it only checks the beginning of the JSON.\n- assertExceptionAsJson(e, true, startsWith(\"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"foo\\\",\" +\n- \"\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"bar\\\",\" +\n- \"\\\"stack_trace\\\":\\\"java.lang.IllegalStateException: bar\" +\n- (Constants.WINDOWS ? \"\\\\r\\\\n\" : \"\\\\n\") +\n- \"\\\\tat org.elasticsearch.\"));\n+ {\n+ ElasticsearchException e = new ElasticsearchException(\"test\");\n+ assertExceptionAsJson(e, \"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"test\\\"}\");\n+ }\n+ {\n+ ElasticsearchException e = new IndexShardRecoveringException(new ShardId(\"_test\", \"_0\", 5));\n+ assertExceptionAsJson(e, \"{\\\"type\\\":\\\"index_shard_recovering_exception\\\",\" +\n+ \"\\\"reason\\\":\\\"CurrentState[RECOVERING] Already recovering\\\",\\\"index_uuid\\\":\\\"_0\\\",\" +\n+ \"\\\"shard\\\":\\\"5\\\",\\\"index\\\":\\\"_test\\\"}\");\n+ }\n+ {\n+ ElasticsearchException e = new BroadcastShardOperationFailedException(new ShardId(\"_index\", \"_uuid\", 12), \"foo\",\n+ new IllegalStateException(\"bar\"));\n+ assertExceptionAsJson(e, \"{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"bar\\\"}\");\n+ }\n+ {\n+ ElasticsearchException e = new ElasticsearchException(new IllegalArgumentException(\"foo\"));\n+ assertExceptionAsJson(e, \"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"java.lang.IllegalArgumentException: foo\\\",\" +\n+ \"\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_argument_exception\\\",\\\"reason\\\":\\\"foo\\\"}}\");\n+ }\n+ {\n+ ElasticsearchException e = new SearchParseException(new TestSearchContext(null), \"foo\", new XContentLocation(1,0));\n+ assertExceptionAsJson(e, \"{\\\"type\\\":\\\"search_parse_exception\\\",\\\"reason\\\":\\\"foo\\\",\\\"line\\\":1,\\\"col\\\":0}\");\n+ }\n+ {\n+ ElasticsearchException ex = new ElasticsearchException(\"foo\",\n+ new ElasticsearchException(\"bar\", new IllegalArgumentException(\"index is closed\", new RuntimeException(\"foobar\"))));\n+ assertExceptionAsJson(ex, \"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"foo\\\",\\\"caused_by\\\":{\\\"type\\\":\\\"exception\\\",\" +\n+ \"\\\"reason\\\":\\\"bar\\\",\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_argument_exception\\\",\\\"reason\\\":\\\"index is closed\\\",\" +\n+ \"\\\"caused_by\\\":{\\\"type\\\":\\\"runtime_exception\\\",\\\"reason\\\":\\\"foobar\\\"}}}}\");\n+ }\n+ {\n+ ElasticsearchException e = new ElasticsearchException(\"foo\", new IllegalStateException(\"bar\"));\n+ assertExceptionAsJson(e, \"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"foo\\\",\" +\n+ \"\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"bar\\\"}}\");\n+\n+ // Test the same exception but with the \"rest.exception.stacktrace.skip\" parameter disabled: the stack_trace must be present\n+ // in the JSON. Since the stack can be large, it only checks the beginning of the JSON.\n+ ToXContent.Params params = new ToXContent.MapParams(\n+ Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, \"false\"));\n+ String actual;\n+ try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) {\n+ builder.startObject();\n+ e.toXContent(builder, params);\n+ builder.endObject();\n+ actual = builder.string();\n+ }\n+ assertThat(actual, startsWith(\"{\\\"type\\\":\\\"exception\\\",\\\"reason\\\":\\\"foo\\\",\" +\n+ \"\\\"caused_by\\\":{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"bar\\\",\" +\n+ \"\\\"stack_trace\\\":\\\"java.lang.IllegalStateException: bar\" +\n+ (Constants.WINDOWS ? \"\\\\r\\\\n\" : \"\\\\n\") +\n+ \"\\\\tat org.elasticsearch.\"));\n+ }\n }\n \n- public void testToXContentWithHeaders() throws IOException {\n+ public void testGenerateThrowableToXContent() throws IOException {\n+ {\n+ Exception ex;\n+ if (randomBoolean()) {\n+ // just a wrapper which is omitted\n+ ex = new RemoteTransportException(\"foobar\", new FileNotFoundException(\"foo not found\"));\n+ } else {\n+ ex = new FileNotFoundException(\"foo not found\");\n+ }\n+ assertExceptionAsJson(ex, \"{\\\"type\\\":\\\"file_not_found_exception\\\",\\\"reason\\\":\\\"foo not found\\\"}\");\n+ }\n+ {\n+ ParsingException ex = new ParsingException(1, 2, \"foobar\", null);\n+ assertExceptionAsJson(ex, \"{\\\"type\\\":\\\"parsing_exception\\\",\\\"reason\\\":\\\"foobar\\\",\\\"line\\\":1,\\\"col\\\":2}\");\n+ }\n+\n+ { // test equivalence\n+ ElasticsearchException ex = new RemoteTransportException(\"foobar\", new FileNotFoundException(\"foo not found\"));\n+ String toXContentString = Strings.toString(ex);\n+ String throwableString = Strings.toString((builder, params) -> {\n+ ElasticsearchException.generateThrowableXContent(builder, params, ex);\n+ return builder;\n+ });\n+\n+ assertEquals(throwableString, toXContentString);\n+ assertEquals(\"{\\\"type\\\":\\\"file_not_found_exception\\\",\\\"reason\\\":\\\"foo not found\\\"}\", toXContentString);\n+ }\n+\n+ { // render header and metadata\n+ ParsingException ex = new ParsingException(1, 2, \"foobar\", null);\n+ ex.addMetadata(\"es.test1\", \"value1\");\n+ ex.addMetadata(\"es.test2\", \"value2\");\n+ ex.addHeader(\"test\", \"some value\");\n+ ex.addHeader(\"test_multi\", \"some value\", \"another value\");\n+ String expected = \"{\\\"type\\\":\\\"parsing_exception\\\",\\\"reason\\\":\\\"foobar\\\",\\\"line\\\":1,\\\"col\\\":2,\" +\n+ \"\\\"test1\\\":\\\"value1\\\",\\\"test2\\\":\\\"value2\\\",\" +\n+ \"\\\"header\\\":{\\\"test_multi\\\":\" +\n+ \"[\\\"some value\\\",\\\"another value\\\"],\\\"test\\\":\\\"some value\\\"}}\";\n+ assertExceptionAsJson(ex, expected);\n+ }\n+ }\n+\n+ public void testToXContentWithHeadersAndMetadata() throws IOException {\n ElasticsearchException e = new ElasticsearchException(\"foo\",\n new ElasticsearchException(\"bar\",\n new ElasticsearchException(\"baz\",\n new ClusterBlockException(singleton(DiscoverySettings.NO_MASTER_BLOCK_WRITES)))));\n e.addHeader(\"foo_0\", \"0\");\n e.addHeader(\"foo_1\", \"1\");\n- e.addHeader(\"es.header_foo_0\", \"foo_0\");\n- e.addHeader(\"es.header_foo_1\", \"foo_1\");\n+ e.addMetadata(\"es.metadata_foo_0\", \"foo_0\");\n+ e.addMetadata(\"es.metadata_foo_1\", \"foo_1\");\n \n final String expectedJson = \"{\"\n + \"\\\"type\\\":\\\"exception\\\",\"\n + \"\\\"reason\\\":\\\"foo\\\",\"\n- + \"\\\"header_foo_0\\\":\\\"foo_0\\\",\"\n- + \"\\\"header_foo_1\\\":\\\"foo_1\\\",\"\n+ + \"\\\"metadata_foo_0\\\":\\\"foo_0\\\",\"\n+ + \"\\\"metadata_foo_1\\\":\\\"foo_1\\\",\"\n + \"\\\"caused_by\\\":{\"\n + \"\\\"type\\\":\\\"exception\\\",\"\n + \"\\\"reason\\\":\\\"bar\\\",\"\n@@ -106,7 +371,7 @@ public void testToXContentWithHeaders() throws IOException {\n + \"}\"\n + \"}\";\n \n- assertExceptionAsJson(e, false, equalTo(expectedJson));\n+ assertExceptionAsJson(e, expectedJson);\n \n ElasticsearchException parsed;\n try (XContentParser parser = createParser(XContentType.JSON.xContent(), expectedJson)) {\n@@ -118,11 +383,12 @@ public void testToXContentWithHeaders() throws IOException {\n \n assertNotNull(parsed);\n assertEquals(parsed.getMessage(), \"Elasticsearch exception [type=exception, reason=foo]\");\n- assertThat(parsed.getHeaderKeys(), hasSize(4));\n- assertEquals(parsed.getHeader(\"header_foo_0\").get(0), \"foo_0\");\n- assertEquals(parsed.getHeader(\"header_foo_1\").get(0), \"foo_1\");\n+ assertThat(parsed.getHeaderKeys(), hasSize(2));\n assertEquals(parsed.getHeader(\"foo_0\").get(0), \"0\");\n assertEquals(parsed.getHeader(\"foo_1\").get(0), \"1\");\n+ assertThat(parsed.getMetadataKeys(), hasSize(2));\n+ assertEquals(parsed.getMetadata(\"es.metadata_foo_0\").get(0), \"foo_0\");\n+ assertEquals(parsed.getMetadata(\"es.metadata_foo_1\").get(0), \"foo_1\");\n \n ElasticsearchException cause = (ElasticsearchException) parsed.getCause();\n assertEquals(cause.getMessage(), \"Elasticsearch exception [type=exception, reason=bar]\");\n@@ -185,24 +451,25 @@ public void testFromXContentWithCause() throws IOException {\n cause = (ElasticsearchException) cause.getCause();\n assertEquals(cause.getMessage(),\n \"Elasticsearch exception [type=routing_missing_exception, reason=routing is required for [_test]/[_type]/[_id]]\");\n- assertThat(cause.getHeaderKeys(), hasSize(2));\n- assertThat(cause.getHeader(\"index\"), hasItem(\"_test\"));\n- assertThat(cause.getHeader(\"index_uuid\"), hasItem(\"_na_\"));\n+ assertThat(cause.getHeaderKeys(), hasSize(0));\n+ assertThat(cause.getMetadataKeys(), hasSize(2));\n+ assertThat(cause.getMetadata(\"es.index\"), hasItem(\"_test\"));\n+ assertThat(cause.getMetadata(\"es.index_uuid\"), hasItem(\"_na_\"));\n }\n \n- public void testFromXContentWithHeaders() throws IOException {\n+ public void testFromXContentWithHeadersAndMetadata() throws IOException {\n RoutingMissingException routing = new RoutingMissingException(\"_test\", \"_type\", \"_id\");\n ElasticsearchException baz = new ElasticsearchException(\"baz\", routing);\n baz.addHeader(\"baz_0\", \"baz0\");\n- baz.addHeader(\"es.baz_1\", \"baz1\");\n+ baz.addMetadata(\"es.baz_1\", \"baz1\");\n baz.addHeader(\"baz_2\", \"baz2\");\n- baz.addHeader(\"es.baz_3\", \"baz3\");\n+ baz.addMetadata(\"es.baz_3\", \"baz3\");\n ElasticsearchException bar = new ElasticsearchException(\"bar\", baz);\n- bar.addHeader(\"es.bar_0\", \"bar0\");\n+ bar.addMetadata(\"es.bar_0\", \"bar0\");\n bar.addHeader(\"bar_1\", \"bar1\");\n- bar.addHeader(\"es.bar_2\", \"bar2\");\n+ bar.addMetadata(\"es.bar_2\", \"bar2\");\n ElasticsearchException foo = new ElasticsearchException(\"foo\", bar);\n- foo.addHeader(\"es.foo_0\", \"foo0\");\n+ foo.addMetadata(\"es.foo_0\", \"foo0\");\n foo.addHeader(\"foo_1\", \"foo1\");\n \n final XContent xContent = randomFrom(XContentType.values()).xContent();\n@@ -218,31 +485,35 @@ public void testFromXContentWithHeaders() throws IOException {\n \n assertNotNull(parsed);\n assertEquals(parsed.getMessage(), \"Elasticsearch exception [type=exception, reason=foo]\");\n- assertThat(parsed.getHeaderKeys(), hasSize(2));\n- assertThat(parsed.getHeader(\"foo_0\"), hasItem(\"foo0\"));\n+ assertThat(parsed.getHeaderKeys(), hasSize(1));\n assertThat(parsed.getHeader(\"foo_1\"), hasItem(\"foo1\"));\n+ assertThat(parsed.getMetadataKeys(), hasSize(1));\n+ assertThat(parsed.getMetadata(\"es.foo_0\"), hasItem(\"foo0\"));\n \n ElasticsearchException cause = (ElasticsearchException) parsed.getCause();\n assertEquals(cause.getMessage(), \"Elasticsearch exception [type=exception, reason=bar]\");\n- assertThat(cause.getHeaderKeys(), hasSize(3));\n- assertThat(cause.getHeader(\"bar_0\"), hasItem(\"bar0\"));\n+ assertThat(cause.getHeaderKeys(), hasSize(1));\n assertThat(cause.getHeader(\"bar_1\"), hasItem(\"bar1\"));\n- assertThat(cause.getHeader(\"bar_2\"), hasItem(\"bar2\"));\n+ assertThat(cause.getMetadataKeys(), hasSize(2));\n+ assertThat(cause.getMetadata(\"es.bar_0\"), hasItem(\"bar0\"));\n+ assertThat(cause.getMetadata(\"es.bar_2\"), hasItem(\"bar2\"));\n \n cause = (ElasticsearchException) cause.getCause();\n assertEquals(cause.getMessage(), \"Elasticsearch exception [type=exception, reason=baz]\");\n- assertThat(cause.getHeaderKeys(), hasSize(4));\n+ assertThat(cause.getHeaderKeys(), hasSize(2));\n assertThat(cause.getHeader(\"baz_0\"), hasItem(\"baz0\"));\n- assertThat(cause.getHeader(\"baz_1\"), hasItem(\"baz1\"));\n assertThat(cause.getHeader(\"baz_2\"), hasItem(\"baz2\"));\n- assertThat(cause.getHeader(\"baz_3\"), hasItem(\"baz3\"));\n+ assertThat(cause.getMetadataKeys(), hasSize(2));\n+ assertThat(cause.getMetadata(\"es.baz_1\"), hasItem(\"baz1\"));\n+ assertThat(cause.getMetadata(\"es.baz_3\"), hasItem(\"baz3\"));\n \n cause = (ElasticsearchException) cause.getCause();\n assertEquals(cause.getMessage(),\n \"Elasticsearch exception [type=routing_missing_exception, reason=routing is required for [_test]/[_type]/[_id]]\");\n- assertThat(cause.getHeaderKeys(), hasSize(2));\n- assertThat(cause.getHeader(\"index\"), hasItem(\"_test\"));\n- assertThat(cause.getHeader(\"index_uuid\"), hasItem(\"_na_\"));\n+ assertThat(cause.getHeaderKeys(), hasSize(0));\n+ assertThat(cause.getMetadataKeys(), hasSize(2));\n+ assertThat(cause.getMetadata(\"es.index\"), hasItem(\"_test\"));\n+ assertThat(cause.getMetadata(\"es.index_uuid\"), hasItem(\"_na_\"));\n }\n \n /**\n@@ -251,17 +522,15 @@ public void testFromXContentWithHeaders() throws IOException {\n * By default, the stack trace of the exception is not rendered. The parameter `errorTrace` forces the stack trace to\n * be rendered like the REST API does when the \"error_trace\" parameter is set to true.\n */\n- private static void assertExceptionAsJson(ElasticsearchException e, boolean errorTrace, Matcher<String> expected)\n- throws IOException {\n- ToXContent.Params params = ToXContent.EMPTY_PARAMS;\n- if (errorTrace) {\n- params = new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, \"false\"));\n- }\n- try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) {\n- builder.startObject();\n- e.toXContent(builder, params);\n- builder.endObject();\n- assertThat(builder.bytes().utf8ToString(), expected);\n- }\n+ private static void assertToXContentAsJson(ToXContent e, String expectedJson) throws IOException {\n+ BytesReference actual = XContentHelper.toXContent(e, XContentType.JSON, randomBoolean());\n+ assertToXContentEquivalent(new BytesArray(expectedJson), actual, XContentType.JSON);\n+ }\n+\n+ private static void assertExceptionAsJson(Exception e, String expectedJson) throws IOException {\n+ assertToXContentAsJson((builder, params) -> {\n+ ElasticsearchException.generateThrowableXContent(builder, params, e);\n+ return builder;\n+ }, expectedJson);\n }\n }", "filename": "core/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java", "status": "modified" }, { "diff": "@@ -18,6 +18,11 @@\n */\n package org.elasticsearch;\n \n+import org.apache.lucene.index.CorruptIndexException;\n+import org.apache.lucene.index.IndexFormatTooNewException;\n+import org.apache.lucene.index.IndexFormatTooOldException;\n+import org.apache.lucene.store.AlreadyClosedException;\n+import org.apache.lucene.store.LockObtainFailedException;\n import org.elasticsearch.action.FailedNodeException;\n import org.elasticsearch.action.RoutingMissingException;\n import org.elasticsearch.action.TimestampParsingException;\n@@ -33,8 +38,11 @@\n import org.elasticsearch.cluster.routing.ShardRoutingState;\n import org.elasticsearch.cluster.routing.TestShardRouting;\n import org.elasticsearch.common.ParsingException;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.UUIDs;\n import org.elasticsearch.common.breaker.CircuitBreakingException;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.PathUtils;\n import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper;\n@@ -44,9 +52,6 @@\n import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.common.util.CancellableThreadsTests;\n import org.elasticsearch.common.util.set.Sets;\n-import org.elasticsearch.common.xcontent.ToXContent;\n-import org.elasticsearch.common.xcontent.XContentBuilder;\n-import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentLocation;\n import org.elasticsearch.discovery.DiscoverySettings;\n import org.elasticsearch.env.ShardLockObtainFailedException;\n@@ -81,6 +86,8 @@\n import org.elasticsearch.transport.ConnectTransportException;\n import org.elasticsearch.transport.TcpTransport;\n \n+import java.io.EOFException;\n+import java.io.FileNotFoundException;\n import java.io.IOException;\n import java.net.URISyntaxException;\n import java.nio.file.AccessDeniedException;\n@@ -97,6 +104,7 @@\n import java.nio.file.Path;\n import java.nio.file.attribute.BasicFileAttributes;\n import java.util.Arrays;\n+import java.util.Base64;\n import java.util.HashMap;\n import java.util.HashSet;\n import java.util.Map;\n@@ -107,6 +115,7 @@\n import static java.util.Collections.emptyMap;\n import static java.util.Collections.emptySet;\n import static java.util.Collections.singleton;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable;\n import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n import static org.hamcrest.Matchers.instanceOf;\n \n@@ -511,30 +520,16 @@ public void testClusterBlockException() throws IOException {\n assertEquals(1, ex.blocks().size());\n }\n \n- private String toXContent(ToXContent x) {\n- try {\n- XContentBuilder builder = XContentFactory.jsonBuilder();\n- builder.startObject();\n- x.toXContent(builder, ToXContent.EMPTY_PARAMS);\n- builder.endObject();\n- return builder.string();\n- } catch (IOException e) {\n- return \"{ \\\"error\\\" : \\\"\" + e.getMessage() + \"\\\"}\";\n- }\n- }\n-\n public void testNotSerializableExceptionWrapper() throws IOException {\n NotSerializableExceptionWrapper ex = serialize(new NotSerializableExceptionWrapper(new NullPointerException()));\n- assertEquals(\"{\\\"type\\\":\\\"null_pointer_exception\\\",\\\"reason\\\":\\\"null_pointer_exception: null\\\"}\", toXContent(ex));\n+ assertEquals(\"{\\\"type\\\":\\\"null_pointer_exception\\\",\\\"reason\\\":\\\"null_pointer_exception: null\\\"}\", Strings.toString(ex));\n ex = serialize(new NotSerializableExceptionWrapper(new IllegalArgumentException(\"nono!\")));\n- assertEquals(\"{\\\"type\\\":\\\"illegal_argument_exception\\\",\\\"reason\\\":\\\"illegal_argument_exception: nono!\\\"}\", toXContent(ex));\n+ assertEquals(\"{\\\"type\\\":\\\"illegal_argument_exception\\\",\\\"reason\\\":\\\"illegal_argument_exception: nono!\\\"}\", Strings.toString(ex));\n \n class UnknownException extends Exception {\n-\n- public UnknownException(final String message) {\n+ UnknownException(final String message) {\n super(message);\n }\n-\n }\n \n Exception[] unknowns = new Exception[]{\n@@ -559,28 +554,94 @@ public UnknownException(final String message) {\n }\n }\n \n+ public void testUnknownException() throws IOException {\n+ ParsingException parsingException = new ParsingException(1, 2, \"foobar\", null);\n+ final Exception ex = new UnknownException(\"eggplant\", parsingException);\n+ Exception exception = serialize(ex);\n+ assertEquals(\"unknown_exception: eggplant\", exception.getMessage());\n+ assertTrue(exception instanceof ElasticsearchException);\n+ ParsingException e = (ParsingException)exception.getCause();\n+ assertEquals(parsingException.getIndex(), e.getIndex());\n+ assertEquals(parsingException.getMessage(), e.getMessage());\n+ assertEquals(parsingException.getLineNumber(), e.getLineNumber());\n+ assertEquals(parsingException.getColumnNumber(), e.getColumnNumber());\n+ }\n+\n+ public void testWriteThrowable() throws IOException {\n+ final QueryShardException queryShardException = new QueryShardException(new Index(\"foo\", \"_na_\"), \"foobar\", null);\n+ final UnknownException unknownException = new UnknownException(\"this exception is unknown\", queryShardException);\n+\n+ final Exception[] causes = new Exception[]{\n+ new IllegalStateException(\"foobar\"),\n+ new IllegalArgumentException(\"alalaal\"),\n+ new NullPointerException(\"boom\"),\n+ new EOFException(\"dadada\"),\n+ new ElasticsearchSecurityException(\"nono!\"),\n+ new NumberFormatException(\"not a number\"),\n+ new CorruptIndexException(\"baaaam booom\", \"this is my resource\"),\n+ new IndexFormatTooNewException(\"tooo new\", 1, 2, 3),\n+ new IndexFormatTooOldException(\"tooo new\", 1, 2, 3),\n+ new IndexFormatTooOldException(\"tooo new\", \"very old version\"),\n+ new ArrayIndexOutOfBoundsException(\"booom\"),\n+ new StringIndexOutOfBoundsException(\"booom\"),\n+ new FileNotFoundException(\"booom\"),\n+ new NoSuchFileException(\"booom\"),\n+ new AlreadyClosedException(\"closed!!\", new NullPointerException()),\n+ new LockObtainFailedException(\"can't lock directory\", new NullPointerException()),\n+ unknownException};\n+ for (final Exception cause : causes) {\n+ ElasticsearchException ex = new ElasticsearchException(\"topLevel\", cause);\n+ ElasticsearchException deserialized = serialize(ex);\n+ assertEquals(deserialized.getMessage(), ex.getMessage());\n+ assertTrue(\"Expected: \" + deserialized.getCause().getMessage() + \" to contain: \" +\n+ ex.getCause().getClass().getName() + \" but it didn't\",\n+ deserialized.getCause().getMessage().contains(ex.getCause().getMessage()));\n+ if (ex.getCause().getClass() != UnknownException.class) { // unknown exception is not directly mapped\n+ assertEquals(deserialized.getCause().getClass(), ex.getCause().getClass());\n+ } else {\n+ assertEquals(deserialized.getCause().getClass(), NotSerializableExceptionWrapper.class);\n+ }\n+ assertArrayEquals(deserialized.getStackTrace(), ex.getStackTrace());\n+ assertTrue(deserialized.getStackTrace().length > 1);\n+ assertVersionSerializable(VersionUtils.randomVersion(random()), cause);\n+ assertVersionSerializable(VersionUtils.randomVersion(random()), ex);\n+ assertVersionSerializable(VersionUtils.randomVersion(random()), deserialized);\n+ }\n+ }\n+\n public void testWithRestHeadersException() throws IOException {\n- ElasticsearchException ex = new ElasticsearchException(\"msg\");\n- ex.addHeader(\"foo\", \"foo\", \"bar\");\n- ex = serialize(ex);\n- assertEquals(\"msg\", ex.getMessage());\n- assertEquals(2, ex.getHeader(\"foo\").size());\n- assertEquals(\"foo\", ex.getHeader(\"foo\").get(0));\n- assertEquals(\"bar\", ex.getHeader(\"foo\").get(1));\n-\n- RestStatus status = randomFrom(RestStatus.values());\n- // ensure we are carrying over the headers even if not serialized\n- UnknownHeaderException uhe = new UnknownHeaderException(\"msg\", status);\n- uhe.addHeader(\"foo\", \"foo\", \"bar\");\n-\n- ElasticsearchException serialize = serialize((ElasticsearchException) uhe);\n- assertTrue(serialize instanceof NotSerializableExceptionWrapper);\n- NotSerializableExceptionWrapper e = (NotSerializableExceptionWrapper) serialize;\n- assertEquals(\"unknown_header_exception: msg\", e.getMessage());\n- assertEquals(2, e.getHeader(\"foo\").size());\n- assertEquals(\"foo\", e.getHeader(\"foo\").get(0));\n- assertEquals(\"bar\", e.getHeader(\"foo\").get(1));\n- assertSame(status, e.status());\n+ {\n+ ElasticsearchException ex = new ElasticsearchException(\"msg\");\n+ ex.addHeader(\"foo\", \"foo\", \"bar\");\n+ ex.addMetadata(\"es.foo_metadata\", \"value1\", \"value2\");\n+ ex = serialize(ex);\n+ assertEquals(\"msg\", ex.getMessage());\n+ assertEquals(2, ex.getHeader(\"foo\").size());\n+ assertEquals(\"foo\", ex.getHeader(\"foo\").get(0));\n+ assertEquals(\"bar\", ex.getHeader(\"foo\").get(1));\n+ assertEquals(2, ex.getMetadata(\"es.foo_metadata\").size());\n+ assertEquals(\"value1\", ex.getMetadata(\"es.foo_metadata\").get(0));\n+ assertEquals(\"value2\", ex.getMetadata(\"es.foo_metadata\").get(1));\n+ }\n+ {\n+ RestStatus status = randomFrom(RestStatus.values());\n+ // ensure we are carrying over the headers and metadata even if not serialized\n+ UnknownHeaderException uhe = new UnknownHeaderException(\"msg\", status);\n+ uhe.addHeader(\"foo\", \"foo\", \"bar\");\n+ uhe.addMetadata(\"es.foo_metadata\", \"value1\", \"value2\");\n+\n+ ElasticsearchException serialize = serialize((ElasticsearchException) uhe);\n+ assertTrue(serialize instanceof NotSerializableExceptionWrapper);\n+ NotSerializableExceptionWrapper e = (NotSerializableExceptionWrapper) serialize;\n+ assertEquals(\"unknown_header_exception: msg\", e.getMessage());\n+ assertEquals(2, e.getHeader(\"foo\").size());\n+ assertEquals(\"foo\", e.getHeader(\"foo\").get(0));\n+ assertEquals(\"bar\", e.getHeader(\"foo\").get(1));\n+ assertEquals(2, e.getMetadata(\"es.foo_metadata\").size());\n+ assertEquals(\"value1\", e.getMetadata(\"es.foo_metadata\").get(0));\n+ assertEquals(\"value2\", e.getMetadata(\"es.foo_metadata\").get(1));\n+ assertSame(status, e.status());\n+ }\n }\n \n public void testNoLongerPrimaryShardException() throws IOException {\n@@ -594,7 +655,7 @@ public void testNoLongerPrimaryShardException() throws IOException {\n public static class UnknownHeaderException extends ElasticsearchException {\n private final RestStatus status;\n \n- public UnknownHeaderException(String msg, RestStatus status) {\n+ UnknownHeaderException(String msg, RestStatus status) {\n super(msg);\n this.status = status;\n }\n@@ -857,5 +918,75 @@ public void testBWCShardLockObtainFailedException() throws IOException {\n assertEquals(\"shard_lock_obtain_failed_exception: [foo][1]: boom\", ex.getMessage());\n }\n \n+ public void testBWCHeadersAndMetadata() throws IOException {\n+ //this is a request serialized with headers only, no metadata as they were added in 5.3.0\n+ BytesReference decoded = new BytesArray(Base64.getDecoder().decode\n+ (\"AQ10ZXN0ICBtZXNzYWdlACYtb3JnLmVsYXN0aWNzZWFyY2guRXhjZXB0aW9uU2VyaWFsaXphdGlvblRlc3RzASBFeGNlcHRpb25TZXJpYWxpemF0aW9uVG\" +\n+ \"VzdHMuamF2YQR0ZXN03wYkc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2Y\" +\n+ \"QdpbnZva2Uw/v///w8kc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2YQZp\" +\n+ \"bnZva2U+KHN1bi5yZWZsZWN0LkRlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwBIURlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwuamF2YQZ\" +\n+ \"pbnZva2UrGGphdmEubGFuZy5yZWZsZWN0Lk1ldGhvZAELTWV0aG9kLmphdmEGaW52b2tl8QMzY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdG\" +\n+ \"VzdGluZy5SYW5kb21pemVkUnVubmVyARVSYW5kb21pemVkUnVubmVyLmphdmEGaW52b2tlsQ01Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkd\" +\n+ \"GVzdGluZy5SYW5kb21pemVkUnVubmVyJDgBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0ZYsHNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9t\" +\n+ \"aXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ5ARVSYW5kb21pemVkUnVubmVyLmphdmEIZXZhbHVhdGWvBzZjb20uY2Fycm90c2VhcmNoLnJ\" +\n+ \"hbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkMTABFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0Zb0HOWNvbS5jYXJyb3RzZW\" +\n+ \"FyY2gucmFuZG9taXplZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDVvcmcuY\" +\n+ \"XBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlU2V0dXBUZWFyZG93bkNoYWluZWQkMQEhVGVzdFJ1bGVTZXR1cFRlYXJkb3duQ2hhaW5lZC5qYXZh\" +\n+ \"CGV2YWx1YXRlMTBvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLkFic3RyYWN0QmVmb3JlQWZ0ZXJSdWxlJDEBHEFic3RyYWN0QmVmb3JlQWZ0ZXJSdWx\" +\n+ \"lLmphdmEIZXZhbHVhdGUtMm9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVUaHJlYWRBbmRUZXN0TmFtZSQxAR5UZXN0UnVsZVRocmVhZE\" +\n+ \"FuZFRlc3ROYW1lLmphdmEIZXZhbHVhdGUwN29yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVBZnRlck1heEZhaWx1cmVzJDEBI\" +\n+ \"1Rlc3RSdWxlSWdub3JlQWZ0ZXJNYXhGYWlsdXJlcy5qYXZhCGV2YWx1YXRlQCxvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlTWFya0Zh\" +\n+ \"aWx1cmUkMQEYVGVzdFJ1bGVNYXJrRmFpbHVyZS5qYXZhCGV2YWx1YXRlLzljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGV\" +\n+ \"zLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdG\" +\n+ \"luZy5UaHJlYWRMZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wI0Y29tLmNhcnJvdHNlYXJja\" +\n+ \"C5yYW5kb21pemVkdGVzdGluZy5UaHJlYWRMZWFrQ29udHJvbAEWVGhyZWFkTGVha0NvbnRyb2wuamF2YRJmb3JrVGltZW91dGluZ1Rhc2urBjZj\" +\n+ \"b20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlRocmVhZExlYWtDb250cm9sJDMBFlRocmVhZExlYWtDb250cm9sLmphdmEIZXZhbHV\" +\n+ \"hdGXOAzNjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQ1ydW\" +\n+ \"5TaW5nbGVUZXN0lAc1Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5SYW5kb21pemVkUnVubmVyJDUBFVJhbmRvbWl6ZWRSdW5uZ\" +\n+ \"XIuamF2YQhldmFsdWF0ZaIGNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ2ARVSYW5kb21pemVk\" +\n+ \"UnVubmVyLmphdmEIZXZhbHVhdGXUBjVjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkNwEVUmFuZG9\" +\n+ \"taXplZFJ1bm5lci5qYXZhCGV2YWx1YXRl3wYwb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5BYnN0cmFjdEJlZm9yZUFmdGVyUnVsZSQxARxBYnN0cm\" +\n+ \"FjdEJlZm9yZUFmdGVyUnVsZS5qYXZhCGV2YWx1YXRlLTljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVud\" +\n+ \"EFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQvb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZVN0b3JlQ2xhc3NO\" +\n+ \"YW1lJDEBG1Rlc3RSdWxlU3RvcmVDbGFzc05hbWUuamF2YQhldmFsdWF0ZSlOY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWx\" +\n+ \"lcy5Ob1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZSQxAShOb1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZS5qYXZhCG\" +\n+ \"V2YWx1YXRlKE5jb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLk5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSd\" +\n+ \"WxlJDEBKE5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSdWxlLmphdmEIZXZhbHVhdGUoOWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXpl\" +\n+ \"ZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDljb20uY2Fycm90c2VhcmNoLnJ\" +\n+ \"hbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQ5Y29tLmNhcnJvdH\" +\n+ \"NlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWxlcy5TdGF0ZW1lbnRBZGFwdGVyARVTdGF0ZW1lbnRBZGFwdGVyLmphdmEIZXZhbHVhdGUkM29yZ\" +\n+ \"y5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQkMQEfVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQuamF2YQhl\" +\n+ \"dmFsdWF0ZTUsb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZU1hcmtGYWlsdXJlJDEBGFRlc3RSdWxlTWFya0ZhaWx1cmUuamF2YQhldmF\" +\n+ \"sdWF0ZS83b3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZUlnbm9yZUFmdGVyTWF4RmFpbHVyZXMkMQEjVGVzdFJ1bGVJZ25vcmVBZnRlck\" +\n+ \"1heEZhaWx1cmVzLmphdmEIZXZhbHVhdGVAMW9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVUZXN0U3VpdGVzJDEBHVRlc3RSd\" +\n+ \"WxlSWdub3JlVGVzdFN1aXRlcy5qYXZhCGV2YWx1YXRlNjljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVu\" +\n+ \"dEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5UaHJlYWR\" +\n+ \"MZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wIQamF2YS5sYW5nLlRocmVhZAELVGhyZWFkLm\" +\n+ \"phdmEDcnVu6QUABAdoZWFkZXIyAQZ2YWx1ZTIKZXMuaGVhZGVyMwEGdmFsdWUzB2hlYWRlcjEBBnZhbHVlMQplcy5oZWFkZXI0AQZ2YWx1ZTQAA\" +\n+ \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" +\n+ \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" +\n+ \"AAAAA\"));\n+\n+ try (StreamInput in = decoded.streamInput()) {\n+ //randomize the version across released and unreleased ones\n+ Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,\n+ Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);\n+ in.setVersion(version);\n+ ElasticsearchException exception = new ElasticsearchException(in);\n+ assertEquals(\"test message\", exception.getMessage());\n+ //the headers received as part of a single set get split based on their prefix\n+ assertEquals(2, exception.getHeaderKeys().size());\n+ assertEquals(\"value1\", exception.getHeader(\"header1\").get(0));\n+ assertEquals(\"value2\", exception.getHeader(\"header2\").get(0));\n+ assertEquals(2, exception.getMetadataKeys().size());\n+ assertEquals(\"value3\", exception.getMetadata(\"es.header3\").get(0));\n+ assertEquals(\"value4\", exception.getMetadata(\"es.header4\").get(0));\n+ }\n+ }\n \n+ private static class UnknownException extends Exception {\n+ UnknownException(final String message, final Exception cause) {\n+ super(message, cause);\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java", "status": "modified" }, { "diff": "@@ -340,10 +340,16 @@ private static void assertThrowable(XContentParser parser, Throwable cause) thro\n ElasticsearchException ex = (ElasticsearchException) cause;\n for (String name : ex.getHeaderKeys()) {\n assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());\n- assertEquals(name.replaceFirst(\"es.\", \"\"), parser.currentName());\n+ assertEquals(name, parser.currentName());\n assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken());\n assertEquals(ex.getHeader(name).get(0), parser.text());\n }\n+ for (String name : ex.getMetadataKeys()) {\n+ assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());\n+ assertEquals(name.replaceFirst(\"es.\", \"\"), parser.currentName());\n+ assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken());\n+ assertEquals(ex.getMetadata(name).get(0), parser.text());\n+ }\n if (ex.getCause() != null) {\n assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());\n assertEquals(\"caused_by\", parser.currentName());", "filename": "core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java", "status": "modified" }, { "diff": "@@ -45,18 +45,17 @@\n public class BytesRestResponseTests extends ESTestCase {\n \n class UnknownException extends Exception {\n-\n- public UnknownException(final String message, final Throwable cause) {\n+ UnknownException(final String message, final Throwable cause) {\n super(message, cause);\n }\n-\n }\n \n public void testWithHeaders() throws Exception {\n RestRequest request = new FakeRestRequest();\n RestChannel channel = randomBoolean() ? new DetailedExceptionRestChannel(request) : new SimpleExceptionRestChannel(request);\n \n BytesRestResponse response = new BytesRestResponse(channel, new WithHeadersException());\n+ assertEquals(2, response.getHeaders().size());\n assertThat(response.getHeaders().get(\"n1\"), notNullValue());\n assertThat(response.getHeaders().get(\"n1\"), contains(\"v11\", \"v12\"));\n assertThat(response.getHeaders().get(\"n2\"), notNullValue());\n@@ -217,6 +216,7 @@ public static class WithHeadersException extends ElasticsearchException {\n super(\"\");\n this.addHeader(\"n1\", \"v11\", \"v12\");\n this.addHeader(\"n2\", \"v21\", \"v22\");\n+ this.addMetadata(\"es.test\", \"value1\", \"value2\");\n }\n }\n ", "filename": "core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java", "status": "modified" }, { "diff": "@@ -59,11 +59,11 @@ Object getObjectToExplain() {\n /**\n * Headers to be added to the {@link ScriptException} for structured rendering.\n */\n- Map<String, List<String>> getHeaders() {\n- Map<String, List<String>> headers = new TreeMap<>();\n- headers.put(\"es.class\", singletonList(objectToExplain == null ? \"null\" : objectToExplain.getClass().getName()));\n- headers.put(\"es.to_string\", singletonList(Objects.toString(objectToExplain)));\n- return headers;\n+ Map<String, List<String>> getMetadata() {\n+ Map<String, List<String>> metadata = new TreeMap<>();\n+ metadata.put(\"es.class\", singletonList(objectToExplain == null ? \"null\" : objectToExplain.getClass().getName()));\n+ metadata.put(\"es.to_string\", singletonList(Objects.toString(objectToExplain)));\n+ return metadata;\n }\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Debug.java", "status": "modified" }, { "diff": "@@ -123,7 +123,7 @@ public Object run() {\n return executable.execute(variables, scorer, doc, aggregationValue);\n // Note that it is safe to catch any of the following errors since Painless is stateless.\n } catch (Debug.PainlessExplainError e) {\n- throw convertToScriptException(e, e.getHeaders());\n+ throw convertToScriptException(e, e.getMetadata());\n } catch (PainlessError | BootstrapMethodError | OutOfMemoryError | StackOverflowError | Exception e) {\n throw convertToScriptException(e, emptyMap());\n }\n@@ -135,7 +135,7 @@ public Object run() {\n * @param t The throwable to build an exception around.\n * @return The generated ScriptException.\n */\n- private ScriptException convertToScriptException(Throwable t, Map<String, List<String>> headers) {\n+ private ScriptException convertToScriptException(Throwable t, Map<String, List<String>> metadata) {\n // create a script stack: this is just the script portion\n List<String> scriptStack = new ArrayList<>();\n for (StackTraceElement element : t.getStackTrace()) {\n@@ -179,8 +179,8 @@ private ScriptException convertToScriptException(Throwable t, Map<String, List<S\n name = executable.getName();\n }\n ScriptException scriptException = new ScriptException(\"runtime error\", t, scriptStack, name, PainlessScriptEngineService.NAME);\n- for (Map.Entry<String, List<String>> header : headers.entrySet()) {\n- scriptException.addHeader(header.getKey(), header.getValue());\n+ for (Map.Entry<String, List<String>> entry : metadata.entrySet()) {\n+ scriptException.addMetadata(entry.getKey(), entry.getValue());\n }\n return scriptException;\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java", "status": "modified" }, { "diff": "@@ -39,14 +39,14 @@ public void testExplain() {\n Debug.PainlessExplainError e = expectScriptThrows(Debug.PainlessExplainError.class, () -> exec(\n \"Debug.explain(params.a)\", params, true));\n assertSame(dummy, e.getObjectToExplain());\n- assertThat(e.getHeaders(), hasEntry(\"es.class\", singletonList(\"java.lang.Object\")));\n- assertThat(e.getHeaders(), hasEntry(\"es.to_string\", singletonList(dummy.toString())));\n+ assertThat(e.getMetadata(), hasEntry(\"es.class\", singletonList(\"java.lang.Object\")));\n+ assertThat(e.getMetadata(), hasEntry(\"es.to_string\", singletonList(dummy.toString())));\n \n // Null should be ok\n e = expectScriptThrows(Debug.PainlessExplainError.class, () -> exec(\"Debug.explain(null)\"));\n assertNull(e.getObjectToExplain());\n- assertThat(e.getHeaders(), hasEntry(\"es.class\", singletonList(\"null\")));\n- assertThat(e.getHeaders(), hasEntry(\"es.to_string\", singletonList(\"null\")));\n+ assertThat(e.getMetadata(), hasEntry(\"es.class\", singletonList(\"null\")));\n+ assertThat(e.getMetadata(), hasEntry(\"es.to_string\", singletonList(\"null\")));\n \n // You can't catch the explain exception\n e = expectScriptThrows(Debug.PainlessExplainError.class, () -> exec(\n@@ -64,15 +64,15 @@ public void testExplain() {\n public void testPainlessExplainErrorSerialization() throws IOException {\n Map<String, Object> params = singletonMap(\"a\", \"jumped over the moon\");\n ScriptException e = expectThrows(ScriptException.class, () -> exec(\"Debug.explain(params.a)\", params, true));\n- assertEquals(singletonList(\"java.lang.String\"), e.getHeader(\"es.class\"));\n- assertEquals(singletonList(\"jumped over the moon\"), e.getHeader(\"es.to_string\"));\n+ assertEquals(singletonList(\"java.lang.String\"), e.getMetadata(\"es.class\"));\n+ assertEquals(singletonList(\"jumped over the moon\"), e.getMetadata(\"es.to_string\"));\n \n try (BytesStreamOutput out = new BytesStreamOutput()) {\n out.writeException(e);\n try (StreamInput in = out.bytes().streamInput()) {\n ElasticsearchException read = (ScriptException) in.readException();\n- assertEquals(singletonList(\"java.lang.String\"), read.getHeader(\"es.class\"));\n- assertEquals(singletonList(\"jumped over the moon\"), read.getHeader(\"es.to_string\"));\n+ assertEquals(singletonList(\"java.lang.String\"), read.getMetadata(\"es.class\"));\n+ assertEquals(singletonList(\"jumped over the moon\"), read.getMetadata(\"es.to_string\"));\n }\n }\n }", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java", "status": "modified" } ] }
{ "body": "Since #22200 the `Settings` class now uses a static `DeprecationLogger`. This makes Log4j2 status logger yells when Elasticsearch starts with the message `ERROR StatusLogger No log4j2 configuration file found. Using default configuration` because this deprecation logger is instantiated before the status logger has been configured.\r\n\r\nThis commit changes the `LogConfigurator.configureWithoutConfig()` method so that the status logger is initialized without using any `Settings`, making the message disappear.", "comments": [ { "body": "@jasontedor I fully agree with your concerns. I was happy enough to have found the cause of this regression but this PR is not the long term solution we must have. So I'm +1 on closing this and maybe @danielmitterdorfer who worked on #22200 can implement your suggestion?\r\n\r\n", "created_at": "2017-01-19T15:52:38Z" }, { "body": "> So I'm +1 on closing this and maybe @danielmitterdorfer who worked on #22200 can implement your suggestion?\r\n\r\n@danielmitterdorfer Will you fix this along the lines I described?", "created_at": "2017-01-19T15:55:40Z" }, { "body": "I discussed this with @danielmitterdorfer via another channel and have opened #22696 as a result of that conversation.", "created_at": "2017-01-19T16:52:29Z" }, { "body": "Thanks for tracking down the cause of the issue @tlrx.", "created_at": "2017-01-19T16:52:57Z" }, { "body": "Superseded by #22696", "created_at": "2017-01-19T16:52:59Z" } ], "number": 22687, "title": "Fix status logger message at startup" }
{ "body": "This commit fixes an issue with deprecation logging for lenient booleans. The underlying issue is that adding deprecation logging for lenient booleans added a static deprecation logger to the Settings class. However, the Settings class is initialized very early and in CLI tools can be initialized before logging is initialized. This leads to status logger error messages. Additionally, the deprecation logging for a lot of the settings does not provide useful context (for example, in the token filter factories, the deprecation logging only produces the name of the setting, but gives no context which token filter factory it comes from). This commit addresses both of these issues by changing the call sites to push a deprecation logger through to the lenient boolean parsing.\r\n\r\nRelates #22200, supersedes #22687", "number": 22696, "review_comments": [ { "body": "Is this package private by intention?", "created_at": "2017-01-19T17:10:27Z" }, { "body": "No, I'm not sure what happened there, thanks for catching it.", "created_at": "2017-01-19T17:12:41Z" }, { "body": "I pushed db599a8716696f47228dfcc0b12c4315150f7d9c.", "created_at": "2017-01-19T17:14:07Z" } ], "title": "Fix deprecation logging for lenient booleans" }
{ "commits": [ { "message": "Fix deprecation logging for lenient booleans\n\nThis commit fixes an issue with deprecation logging for lenient\nbooleans. The underlying issue is that adding deprecation logging for\nlenient booleans added a static deprecation logger to the Settings\nclass. However, the Settings class is initialized very early and in CLI\ntools can be initialized before logging is initialized. This leads to\nstatus logger error messages. Additionally, the deprecation logging for\na lot of the settings does not provide useful context (for example, in\nthe token filter factories, the deprecation logging only produces the\nname of the setting, but gives no context which token filter factory it\ncomes from). This commit addresses both of these issues by changing the\ncall sites to push a deprecation logger through to the lenient boolean\nparsing." }, { "message": "Mark field in Analysis as private" } ], "files": [ { "diff": "@@ -41,6 +41,8 @@\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n+import org.elasticsearch.common.logging.ESLoggerFactory;\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n@@ -1256,6 +1258,8 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti\n }\n }\n \n+ private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(IndexMetaData.class));\n+\n /**\n * Returns <code>true</code> iff the given settings indicate that the index\n * associated with these settings allocates it's shards on a shared\n@@ -1266,7 +1270,7 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti\n public boolean isOnSharedFilesystem(Settings settings) {\n // don't use the setting directly, not to trigger verbose deprecation logging\n return settings.getAsBooleanLenientForPreEs6Indices(\n- this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings));\n+ this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings), deprecationLogger);\n }\n \n /**\n@@ -1280,7 +1284,7 @@ public boolean isIndexUsingShadowReplicas() {\n \n public boolean isIndexUsingShadowReplicas(Settings settings) {\n // don't use the setting directly, not to trigger verbose deprecation logging\n- return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false);\n+ return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false, deprecationLogger);\n }\n \n /**", "filename": "core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java", "status": "modified" }, { "diff": "@@ -76,7 +76,6 @@\n * An immutable settings implementation.\n */\n public final class Settings implements ToXContent {\n- private static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(Settings.class));\n \n public static final Settings EMPTY = new Builder().build();\n private static final Pattern ARRAY_PATTERN = Pattern.compile(\"(.*)\\\\.\\\\d+$\");\n@@ -327,7 +326,11 @@ public Boolean getAsBoolean(String setting, Boolean defaultValue) {\n * @deprecated Only used to provide automatic upgrades for pre 6.0 indices.\n */\n @Deprecated\n- public Boolean getAsBooleanLenientForPreEs6Indices(Version indexVersion, String setting, Boolean defaultValue) {\n+ public Boolean getAsBooleanLenientForPreEs6Indices(\n+ final Version indexVersion,\n+ final String setting,\n+ final Boolean defaultValue,\n+ final DeprecationLogger deprecationLogger) {\n if (indexVersion.before(Version.V_6_0_0_alpha1_UNRELEASED)) {\n //Only emit a warning if the setting's value is not a proper boolean\n final String value = get(setting, \"false\");", "filename": "core/src/main/java/org/elasticsearch/common/settings/Settings.java", "status": "modified" }, { "diff": "@@ -479,9 +479,9 @@ private boolean recoverOnAnyNode(IndexMetaData metaData) {\n // don't use the setting directly, not to trigger verbose deprecation logging\n return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings))\n && (metaData.getSettings().getAsBooleanLenientForPreEs6Indices(\n- metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false) ||\n+ metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger) ||\n this.settings.getAsBooleanLenientForPreEs6Indices\n- (metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false));\n+ (metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger));\n }\n \n protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);", "filename": "core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.index.MergePolicy;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.common.settings.IndexScopedSettings;\n import org.elasticsearch.common.settings.Setting;", "filename": "core/src/main/java/org/elasticsearch/index/IndexSettings.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.apache.lucene.index.MergePolicy;\n import org.apache.lucene.index.NoMergePolicy;\n import org.apache.lucene.index.TieredMergePolicy;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.unit.ByteSizeUnit;\n@@ -165,7 +166,7 @@ public final class MergePolicyConfig {\n double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING);\n double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING);\n this.mergesEnabled = indexSettings.getSettings()\n- .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), INDEX_MERGE_ENABLED, true);\n+ .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), INDEX_MERGE_ENABLED, true, new DeprecationLogger(logger));\n if (mergesEnabled == false) {\n logger.warn(\"[{}] is set to false, this should only be used in tests and can cause serious problems in production environments\", INDEX_MERGE_ENABLED);\n }", "filename": "core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java", "status": "modified" }, { "diff": "@@ -38,7 +38,7 @@ public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory i\n public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {\n super(indexSettings, name, settings);\n preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(\n- indexSettings.getIndexVersionCreated(), PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);\n+ indexSettings.getIndexVersionCreated(), PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL, deprecationLogger);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -56,6 +56,8 @@\n import org.apache.lucene.util.Version;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.io.FileSystemUtils;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n+import org.elasticsearch.common.logging.ESLoggerFactory;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.env.Environment;\n@@ -79,6 +81,8 @@\n \n public class Analysis {\n \n+ private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(Analysis.class));\n+\n public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) {\n // check for explicit version on the specific analyzer component\n String sVersion = settings.get(\"version\");\n@@ -179,13 +183,14 @@ public static CharArraySet parseCommonWords(Environment env, Settings settings,\n }\n \n public static CharArraySet parseArticles(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings) {\n- boolean articlesCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, \"articles_case\", false);\n+ boolean articlesCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, \"articles_case\", false, deprecationLogger);\n return parseWords(env, settings, \"articles\", null, null, articlesCase);\n }\n \n public static CharArraySet parseStopWords(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings,\n CharArraySet defaultStopWords) {\n- boolean stopwordsCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, \"stopwords_case\", false);\n+ boolean stopwordsCase =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, \"stopwords_case\", false, deprecationLogger);\n return parseStopWords(env, settings, defaultStopWords, stopwordsCase);\n }\n \n@@ -214,7 +219,8 @@ public static CharArraySet getWordSet(Environment env, org.elasticsearch.Version\n if (wordList == null) {\n return null;\n }\n- boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, settingsPrefix + \"_case\", false);\n+ boolean ignoreCase =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, settingsPrefix + \"_case\", false, deprecationLogger);\n return new CharArraySet(wordList, ignoreCase);\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/analysis/Analysis.java", "status": "modified" }, { "diff": "@@ -50,7 +50,8 @@ public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {\n \n public CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {\n super(indexSettings, name, settings);\n- outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"output_unigrams\", false);\n+ outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(\n+ indexSettings.getIndexVersionCreated(), \"output_unigrams\", false, deprecationLogger);\n final String[] asArray = settings.getAsArray(\"ignored_scripts\");\n Set<String> scripts = new HashSet<>(Arrays.asList(\"han\", \"hiragana\", \"katakana\", \"hangul\"));\n if (asArray != null) {", "filename": "core/src/main/java/org/elasticsearch/index/analysis/CJKBigramFilterFactory.java", "status": "modified" }, { "diff": "@@ -37,8 +37,8 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {\n \n public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {\n super(indexSettings, name, settings);\n- this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false);\n- this.queryMode = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"query_mode\", false);\n+ this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false, deprecationLogger);\n+ this.queryMode = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"query_mode\", false, deprecationLogger);\n this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase);\n \n if (this.words == null) {", "filename": "core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -46,8 +46,9 @@ public HunspellTokenFilterFactory(IndexSettings indexSettings, String name, Sett\n throw new IllegalArgumentException(String.format(Locale.ROOT, \"Unknown hunspell dictionary for locale [%s]\", locale));\n }\n \n- dedup = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"dedup\", true);\n- longestOnly = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"longest_only\", false);\n+ dedup = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"dedup\", true, deprecationLogger);\n+ longestOnly =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"longest_only\", false, deprecationLogger);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -35,7 +35,8 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory\n public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {\n super(indexSettings, name, settings);\n \n- boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false);\n+ boolean ignoreCase =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false, deprecationLogger);\n Set<?> rules = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, \"keywords\");\n if (rules == null) {\n throw new IllegalArgumentException(\"keyword filter requires either `keywords` or `keywords_path` to be configured\");\n@@ -47,4 +48,5 @@ public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment\n public TokenStream create(TokenStream tokenStream) {\n return new SetKeywordMarkerFilter(tokenStream, keywordLookup);\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -37,7 +37,7 @@ public LimitTokenCountFilterFactory(IndexSettings indexSettings, Environment env\n super(indexSettings, name, settings);\n this.maxTokenCount = settings.getAsInt(\"max_token_count\", DEFAULT_MAX_TOKEN_COUNT);\n this.consumeAllTokens = settings.getAsBooleanLenientForPreEs6Indices(\n- indexSettings.getIndexVersionCreated(), \"consume_all_tokens\", DEFAULT_CONSUME_ALL_TOKENS);\n+ indexSettings.getIndexVersionCreated(), \"consume_all_tokens\", DEFAULT_CONSUME_ALL_TOKENS, deprecationLogger);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java", "status": "modified" }, { "diff": "@@ -56,7 +56,8 @@ public PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment en\n this.replacement = replacement.charAt(0);\n }\n this.skip = settings.getAsInt(\"skip\", PathHierarchyTokenizer.DEFAULT_SKIP);\n- this.reverse = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"reverse\", false);\n+ this.reverse =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"reverse\", false, deprecationLogger);\n }\n \n @Override\n@@ -66,4 +67,5 @@ public Tokenizer create() {\n }\n return new PathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java", "status": "modified" }, { "diff": "@@ -36,7 +36,8 @@ public PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, Str\n super(indexSettings, name, settings);\n \n final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;\n- boolean lowercase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"lowercase\", true);\n+ boolean lowercase =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"lowercase\", true, deprecationLogger);\n CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);\n \n String sPattern = settings.get(\"pattern\", \"\\\\W+\" /*PatternAnalyzer.NON_WORD_PATTERN*/);", "filename": "core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java", "status": "modified" }, { "diff": "@@ -45,7 +45,8 @@ public PatternCaptureGroupTokenFilterFactory(IndexSettings indexSettings, Enviro\n patterns[i] = Pattern.compile(regexes[i]);\n }\n \n- preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), PRESERVE_ORIG_KEY, true);\n+ preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(\n+ indexSettings.getIndexVersionCreated(), PRESERVE_ORIG_KEY, true, deprecationLogger);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/analysis/PatternCaptureGroupTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -43,7 +43,7 @@ public PatternReplaceTokenFilterFactory(IndexSettings indexSettings, Environment\n }\n this.pattern = Regex.compile(sPattern, settings.get(\"flags\"));\n this.replacement = settings.get(\"replacement\", \"\");\n- this.all = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"all\", true);\n+ this.all = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"all\", true, deprecationLogger);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/analysis/PatternReplaceTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -33,8 +33,8 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro\n super(indexSettings, name, settings);\n Integer maxShingleSize = settings.getAsInt(\"max_shingle_size\", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);\n Integer minShingleSize = settings.getAsInt(\"min_shingle_size\", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);\n- Boolean outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"output_unigrams\", true);\n- Boolean outputUnigramsIfNoShingles = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"output_unigrams_if_no_shingles\", false);\n+ Boolean outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"output_unigrams\", true, deprecationLogger);\n+ Boolean outputUnigramsIfNoShingles = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"output_unigrams_if_no_shingles\", false, deprecationLogger);\n String tokenSeparator = settings.get(\"token_separator\", ShingleFilter.DEFAULT_TOKEN_SEPARATOR);\n String fillerToken = settings.get(\"filler_token\", ShingleFilter.DEFAULT_FILLER_TOKEN);\n factory = new Factory(\"shingle\", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken);", "filename": "core/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -40,8 +40,10 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {\n \n public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {\n super(indexSettings, name, settings);\n- this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false);\n- this.removeTrailing = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"remove_trailing\", true);\n+ this.ignoreCase =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false, deprecationLogger);\n+ this.removeTrailing = settings.getAsBooleanLenientForPreEs6Indices(\n+ indexSettings.getIndexVersionCreated(), \"remove_trailing\", true, deprecationLogger);\n this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);\n if (settings.get(\"enable_position_increments\") != null) {\n throw new IllegalArgumentException(\"enable_position_increments is not supported anymore. Please fix your analysis chain\");", "filename": "core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -61,8 +61,10 @@ public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, A\n throw new IllegalArgumentException(\"synonym requires either `synonyms` or `synonyms_path` to be configured\");\n }\n \n- this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false);\n- boolean expand = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"expand\", true);\n+ this.ignoreCase =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false, deprecationLogger);\n+ boolean expand =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"expand\", true, deprecationLogger);\n \n String tokenizerName = settings.get(\"tokenizer\", \"whitespace\");\n AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory =", "filename": "core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -32,7 +32,7 @@ public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory {\n public UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {\n super(indexSettings, name, settings);\n this.onlyOnSamePosition = settings.getAsBooleanLenientForPreEs6Indices(\n- indexSettings.getIndexVersionCreated(), \"only_on_same_position\", false);\n+ indexSettings.getIndexVersionCreated(), \"only_on_same_position\", false, deprecationLogger);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -101,7 +101,7 @@ public TokenStream create(TokenStream tokenStream) {\n }\n \n public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {\n- if (settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), key, defaultValue)) {\n+ if (settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), key, defaultValue, deprecationLogger)) {\n return flag;\n }\n return 0;", "filename": "core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -45,7 +45,7 @@ public AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Envir\n minSubwordSize = settings.getAsInt(\"min_subword_size\", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE);\n maxSubwordSize = settings.getAsInt(\"max_subword_size\", CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE);\n onlyLongestMatch = settings\n- .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"only_longest_match\", false);\n+ .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"only_longest_match\", false, deprecationLogger);\n wordList = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, \"word_list\");\n if (wordList == null) {\n throw new IllegalArgumentException(\"word_list must be provided for [\" + name + \"], either as a path to a file, or directly\");", "filename": "core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -22,6 +22,8 @@\n import org.apache.lucene.search.similarities.BM25Similarity;\n import org.apache.lucene.search.similarities.Similarity;\n import org.elasticsearch.Version;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n+import org.elasticsearch.common.logging.ESLoggerFactory;\n import org.elasticsearch.common.settings.Settings;\n \n /**\n@@ -43,8 +45,9 @@ public BM25SimilarityProvider(String name, Settings settings, Settings indexSett\n super(name);\n float k1 = settings.getAsFloat(\"k1\", 1.2f);\n float b = settings.getAsFloat(\"b\", 0.75f);\n- boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices(\n- Version.indexCreated(indexSettings), \"discount_overlaps\", true);\n+ final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(getClass()));\n+ boolean discountOverlaps =\n+ settings.getAsBooleanLenientForPreEs6Indices(Version.indexCreated(indexSettings), \"discount_overlaps\", true, deprecationLogger);\n \n this.similarity = new BM25Similarity(k1, b);\n this.similarity.setDiscountOverlaps(discountOverlaps);\n@@ -57,4 +60,5 @@ public BM25SimilarityProvider(String name, Settings settings, Settings indexSett\n public Similarity get() {\n return similarity;\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java", "status": "modified" }, { "diff": "@@ -21,6 +21,8 @@\n \n import org.apache.lucene.search.similarities.ClassicSimilarity;\n import org.elasticsearch.Version;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n+import org.elasticsearch.common.logging.ESLoggerFactory;\n import org.elasticsearch.common.settings.Settings;\n \n /**\n@@ -39,7 +41,7 @@ public class ClassicSimilarityProvider extends AbstractSimilarityProvider {\n public ClassicSimilarityProvider(String name, Settings settings, Settings indexSettings) {\n super(name);\n boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices(\n- Version.indexCreated(indexSettings), \"discount_overlaps\", true);\n+ Version.indexCreated(indexSettings), \"discount_overlaps\", true, new DeprecationLogger(ESLoggerFactory.getLogger(getClass())));\n this.similarity.setDiscountOverlaps(discountOverlaps);\n }\n \n@@ -50,4 +52,5 @@ public ClassicSimilarityProvider(String name, Settings settings, Settings indexS\n public ClassicSimilarity get() {\n return similarity;\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java", "status": "modified" }, { "diff": "@@ -26,6 +26,8 @@\n import org.apache.lucene.search.similarities.IndependenceStandardized;\n import org.apache.lucene.search.similarities.Similarity;\n import org.elasticsearch.Version;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n+import org.elasticsearch.common.logging.ESLoggerFactory;\n import org.elasticsearch.common.settings.Settings;\n \n import java.util.HashMap;\n@@ -59,7 +61,7 @@ public class DFISimilarityProvider extends AbstractSimilarityProvider {\n public DFISimilarityProvider(String name, Settings settings, Settings indexSettings) {\n super(name);\n boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices(\n- Version.indexCreated(indexSettings), \"discount_overlaps\", true);\n+ Version.indexCreated(indexSettings), \"discount_overlaps\", true, new DeprecationLogger(ESLoggerFactory.getLogger(getClass())));\n Independence measure = parseIndependence(settings);\n this.similarity = new DFISimilarity(measure);\n this.similarity.setDiscountOverlaps(discountOverlaps);", "filename": "core/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java", "status": "modified" }, { "diff": "@@ -21,6 +21,8 @@\n \n import org.elasticsearch.Version;\n import org.elasticsearch.common.Booleans;\n+import org.elasticsearch.common.logging.DeprecationLogger;\n+import org.elasticsearch.common.logging.ESLoggerFactory;\n import org.elasticsearch.common.settings.loader.YamlSettingsLoader;\n import org.elasticsearch.test.ESTestCase;\n import org.hamcrest.Matchers;\n@@ -163,9 +165,11 @@ public void testLenientBooleanForPreEs6Index() throws IOException {\n .put(\"foo\", falsy)\n .put(\"bar\", truthy).build();\n \n- assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, \"foo\", null));\n- assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, \"bar\", null));\n- assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, \"baz\", true));\n+ final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(\"testLenientBooleanForPreEs6Index\"));\n+\n+ assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, \"foo\", null, deprecationLogger));\n+ assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, \"bar\", null, deprecationLogger));\n+ assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, \"baz\", true, deprecationLogger));\n \n List<String> expectedDeprecationWarnings = new ArrayList<>();\n if (Booleans.isBoolean(falsy) == false) {\n@@ -191,10 +195,12 @@ public void testInvalidLenientBooleanForCurrentIndexVersion() {\n .put(\"foo\", falsy)\n .put(\"bar\", truthy).build();\n \n+ final DeprecationLogger deprecationLogger =\n+ new DeprecationLogger(ESLoggerFactory.getLogger(\"testInvalidLenientBooleanForCurrentIndexVersion\"));\n expectThrows(IllegalArgumentException.class,\n- () -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"foo\", null));\n+ () -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"foo\", null, deprecationLogger));\n expectThrows(IllegalArgumentException.class,\n- () -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"bar\", null));\n+ () -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"bar\", null, deprecationLogger));\n }\n \n @SuppressWarnings(\"deprecation\") //#getAsBooleanLenientForPreEs6Indices is the test subject\n@@ -203,9 +209,11 @@ public void testValidLenientBooleanForCurrentIndexVersion() {\n .put(\"foo\", \"false\")\n .put(\"bar\", \"true\").build();\n \n- assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"foo\", null));\n- assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"bar\", null));\n- assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"baz\", true));\n+ final DeprecationLogger deprecationLogger =\n+ new DeprecationLogger(ESLoggerFactory.getLogger(\"testValidLenientBooleanForCurrentIndexVersion\"));\n+ assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"foo\", null, deprecationLogger));\n+ assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"bar\", null, deprecationLogger));\n+ assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, \"baz\", true, deprecationLogger));\n }\n \n public void testMultLevelGetPrefix() {", "filename": "core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java", "status": "modified" }, { "diff": "@@ -131,7 +131,7 @@ public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment e\n }\n }\n \n- Boolean caseLevel = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"caseLevel\", null);\n+ Boolean caseLevel = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"caseLevel\", null, deprecationLogger);\n if (caseLevel != null) {\n rbc.setCaseLevel(caseLevel);\n }\n@@ -147,7 +147,7 @@ public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment e\n }\n }\n \n- Boolean numeric = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"numeric\", null);\n+ Boolean numeric = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"numeric\", null, deprecationLogger);\n if (numeric != null) {\n rbc.setNumericCollation(numeric);\n }\n@@ -158,7 +158,7 @@ public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment e\n }\n \n Boolean hiraganaQuaternaryMode = settings\n- .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"hiraganaQuaternaryMode\", null);\n+ .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"hiraganaQuaternaryMode\", null, deprecationLogger);\n if (hiraganaQuaternaryMode != null) {\n rbc.setHiraganaQuaternary(hiraganaQuaternaryMode);\n }", "filename": "plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -45,9 +45,9 @@ public class JapaneseStopTokenFilterFactory extends AbstractTokenFilterFactory{\n \n public JapaneseStopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {\n super(indexSettings, name, settings);\n- this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false);\n+ this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"ignore_case\", false, deprecationLogger);\n this.removeTrailing = settings\n- .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"remove_trailing\", true);\n+ .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"remove_trailing\", true, deprecationLogger);\n this.stopWords = Analysis.parseWords(env, settings, \"stopwords\", JapaneseAnalyzer.getDefaultStopSet(), NAMED_STOP_WORDS, ignoreCase);\n }\n ", "filename": "plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/JapaneseStopTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -34,9 +34,9 @@ public class KuromojiIterationMarkCharFilterFactory extends AbstractCharFilterFa\n public KuromojiIterationMarkCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {\n super(indexSettings, name);\n normalizeKanji = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"normalize_kanji\",\n- JapaneseIterationMarkCharFilter.NORMALIZE_KANJI_DEFAULT);\n+ JapaneseIterationMarkCharFilter.NORMALIZE_KANJI_DEFAULT, deprecationLogger);\n normalizeKana = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"normalize_kana\",\n- JapaneseIterationMarkCharFilter.NORMALIZE_KANA_DEFAULT);\n+ JapaneseIterationMarkCharFilter.NORMALIZE_KANA_DEFAULT, deprecationLogger);\n }\n \n @Override", "filename": "plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiIterationMarkCharFilterFactory.java", "status": "modified" }, { "diff": "@@ -31,7 +31,8 @@ public class KuromojiReadingFormFilterFactory extends AbstractTokenFilterFactory\n \n public KuromojiReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {\n super(indexSettings, name, settings);\n- useRomaji = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"use_romaji\", false);\n+ useRomaji =\n+ settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), \"use_romaji\", false, deprecationLogger);\n }\n \n @Override", "filename": "plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java", "status": "modified" } ] }
{ "body": "Elasticsearch version: 5.0.2\r\nPlugins installed: analysis-stempel 5.0,2\r\nJVM version: 1.8.0_111\r\nOS version: Ubuntu 14.04.1 LTS\r\n\r\nI have problem with indexing data when analysis-stempel is set as fields analyser.\r\n\r\nWhile indexing documents at random receives an error\r\nArray index out of range: -1\r\nArray index out of range: -2\r\n\r\nTrace from log:\r\n\r\n```\r\njava.lang.ArrayIndexOutOfBoundsException: Array index out of range: -2\r\n at java.util.Arrays.rangeCheck(Arrays.java:117) ~[?:1.8.0_111]\r\n at java.util.Arrays.fill(Arrays.java:2973) ~[?:1.8.0_111]\r\n at java.lang.AbstractStringBuilder.setLength(AbstractStringBuilder.java:211) ~[?:1.8.0_111]\r\n at java.lang.StringBuilder.setLength(StringBuilder.java:76) ~[?:1.8.0_111]\r\n at org.apache.lucene.analysis.stempel.StempelStemmer.stem(StempelStemmer.java:91) ~[?:?]\r\n at org.apache.lucene.analysis.stempel.StempelFilter.incrementToken(StempelFilter.java:74) ~[?:?]\r\n at org.apache.lucene.analysis.shingle.ShingleFilter.getNextToken(ShingleFilter.java:390) ~[lucene-analyzers-common-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:51]\r\n at org.apache.lucene.analysis.shingle.ShingleFilter.shiftInputWindow(ShingleFilter.java:467) ~[lucene-analyzers-common-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:51]\r\n at org.apache.lucene.analysis.shingle.ShingleFilter.incrementToken(ShingleFilter.java:308) ~[lucene-analyzers-common-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:51]\r\n at org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:714) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:417) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:373) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.apache.lucene.index.DocumentsWriterPerThread.updateDocument(DocumentsWriterPerThread.java:231) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.apache.lucene.index.DocumentsWriter.updateDocument(DocumentsWriter.java:478) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1562) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1307) ~[lucene-core-6.2.1.jar:6.2.1 43ab70147eb494324a1410f7a9f16a896a59bc6f - shalin - 2016-09-15 05:15:20]\r\n at org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:558) ~[elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.index.engine.InternalEngine.innerIndex(InternalEngine.java:520) ~[elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:409) ~[elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:556) ~[elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:546) ~[elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary(TransportIndexAction.java:191) ~[elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardIndexOperation(TransportShardBulkAction.java:351) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.index(TransportShardBulkAction.java:158) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.handleItem(TransportShardBulkAction.java:137) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.onPrimaryShard(TransportShardBulkAction.java:123) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.onPrimaryShard(TransportShardBulkAction.java:74) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportWriteAction.shardOperationOnPrimary(TransportWriteAction.java:78) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportWriteAction.shardOperationOnPrimary(TransportWriteAction.java:50) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform(TransportReplicationAction.java:903) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform(TransportReplicationAction.java:873) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:113) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse(TransportReplicationAction.java:319) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse(TransportReplicationAction.java:254) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse(TransportReplicationAction.java:839) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse(TransportReplicationAction.java:836) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.index.shard.IndexShardOperationsLock.acquire(IndexShardOperationsLock.java:142) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationLock(IndexShard.java:1655) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference(TransportReplicationAction.java:848) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun(TransportReplicationAction.java:271) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:250) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:242) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:550) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) [elasticsearch-5.0.2.jar:5.0.2]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.2.jar:5.0.2]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_111]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_111]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_111]\r\n```", "comments": [ { "body": "It would be really helpful if you could try and identify exactly which of your documents is causing this issue.", "created_at": "2016-12-01T14:01:58Z" }, { "body": "I get this error for random documents, each time is a different content :/\r\n\r\nsample:\r\n\r\n[2016-12-01T14:04:23,782][DEBUG][o.e.a.b.TransportShardBulkAction] [es5] [666.pb1][0] failed to execute bulk item (index) index {[666.pb1][document][224], source[{\"author\":{\"id\":580,\"name\":\"Waldemar G\"},\"indexes\":[{\"id\":19,\"name\":\"K\r\nsi▒.gi rachunkowe\",\"parent_name\":\"Rachunkowo▒.▒. i Finanse\",\"ancestors\":[{\"id\":1,\"name\":\"www\"},{\"id\":17,\"name\":\"Rachunkowo▒.▒. i Finanse\"}]}],\"keywords\":[{\"name\":\"inwentaryzacja ▒.rodków trwa▒.ych\",\"id\":420},{\"name\":\"cz▒.stotliwo▒.▒.\r\n inwentaryzacji ▒.rodków trwa▒.ych\",\"id\":516}],\"content\":\" Pytanie:  Zak▒.ad posiada monitoring. Czy z tego wzgl▒.du spis inwentarzowy ▒.rodków trwa▒.ych i pozosta▒.ych ▒.rodków można przeprowadzi▒. raz na 4 lata, czy co roku? Cz\r\ny musi by▒. zarz▒.dzenie dyrektora, np.  że inwentaryzacj▒. si▒. przeprowadza raz na 4 lata? Odpowiedź: W opisanej sytuacji do kierownika jednostki należy podj▒.cie decyzji w sprawie cz▒.stotliwo▒.ci przeprowadzania inwentaryza\r\ncji ▒.rodków trwa▒.ych. Odpowiadaj▒.c na pytanie należy podkre▒.li▒., że nie ma przepisów definiuj▒.cych poj▒.cie ▒..teren strzeżony▒... Rozdzia▒. 3 uor przedstawia jedynie ogólne uregulowania dotycz▒.ce inwentaryzacji. W zwi▒.\r\nzku z tym podj▒.cie decyzji w sprawie cz▒.stotliwo▒.ci przeprowadzania inwentaryzacji ▒.rodków trwa▒.ych znajduj▒.cych si▒. na terenie strzeżonym należy do kierownika jednostki, co powinno by▒. okre▒.lone w Pa▒.stwa instrukcji inwe\r\nntaryzacyjnej lub w zarz▒.dzeniu dyrektora (kierownika jednostki). Brak okre▒.lenia wewn▒.trznych przepisów w zakresie inwentaryzacji narusza wymagania art. 10 uor. Wi▒.cej odno▒.nie inwentaryzacji przeczyta Pan w ABC Ksi▒.goweg\r\no » Inwentaryzacja Podstawa prawna: - rozdzia▒. 3, art. 10 ustawy z 29 wrze▒.nia 1994 roku o rachunkowo▒.ci (Dz.U. z 2009 r. nr 152, poz. 1223 ze zm.) - uor. Tekst opublikowany:  26 marca 2012 r. \",\"lead\":\" Pytanie:  Za\r\nk▒.ad posiada monitoring. Czy z tego wzgl▒.du spis inwentarzowy ▒.rodków trwa▒.ych i pozosta▒.ych ▒.rodków można przeprowadzi▒. raz na 4 lata, czy co roku? Czy musi by▒. zarz▒.dzenie dyrektora, np.  że inwentaryzacj▒. si▒. przepr9,\"na\r\nowadza raz na 4 lata? \",\"seoDescription\":\"Rozdzia▒. 3 uor przedstawia jedynie ogólne uregulowania dotycz▒.ce inwentaryzacji. Jak cz▒.sto zatem przeprowadza▒. inwentaryzacj▒.?\",\"seoTitle\":\"Cz▒.stotliwo▒.▒. inwentaryzacji\",\"title\":\"Dec:\r\nyzja w sprawie cz▒.stotliwo▒.ci przeprowadzania inwentaryzacji ▒.rodków trwa▒.ych\",\"createdDate\":\"2012-04-30\",\"publicationDate\":\"2015-03-26\",\"validFrom\":\"2012-05-07\",\"validTo\":\"2021-11-30\",\"docType\":\"2\",\"idLanguage\":\"1\",\"isPaid\":\"0\"}\r\n]}\r\njava.lang.ArrayIndexOutOfBoundsException: Array index out of range: -2", "created_at": "2016-12-01T14:32:54Z" }, { "body": "@mikemccand does this look like a bug in Lucene?", "created_at": "2016-12-01T14:59:53Z" }, { "body": "This problem occurs only with BULK operations. Single update, index requests work fine.\r\n\r\nAnother exception is StringIndexOutOfBoundsException:\r\n\r\n```\r\njava.lang.StringIndexOutOfBoundsException: String index out of range: 4\r\n at java.lang.AbstractStringBuilder.getChars(AbstractStringBuilder.java:385) ~[?:1.8.0_111]\r\n at java.lang.StringBuilder.getChars(StringBuilder.java:76) ~[?:1.8.0_111]\r\n at org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl.append(CharTermAttributeImpl.java:183) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.apache.lucene.analysis.stempel.StempelFilter.incrementToken(StempelFilter.java:76) ~[?:?]\r\n at org.apache.lucene.analysis.synonym.SynonymFilter.parse(SynonymFilter.java:358) ~[lucene-analyzers-common-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:43]\r\n at org.apache.lucene.analysis.synonym.SynonymFilter.incrementToken(SynonymFilter.java:624) ~[lucene-analyzers-common-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:43]\r\n at org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter.incrementToken(UniqueTokenFilter.java:53) ~[elasticsearch-5.1.1.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:43]\r\n at org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:712) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:417) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:373) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments(DocumentsWriterPerThread.java:272) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.apache.lucene.index.DocumentsWriter.updateDocuments(DocumentsWriter.java:433) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.apache.lucene.index.IndexWriter.updateDocuments(IndexWriter.java:1375) ~[lucene-core-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:11]\r\n at org.elasticsearch.index.engine.InternalEngine.update(InternalEngine.java:564) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.index.engine.InternalEngine.innerIndex(InternalEngine.java:522) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:409) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:556) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\r\n at org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:546) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\r\n at org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary(TransportIndexAction.java:191) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardIndexOperation(TransportShardBulkAction.java:348) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardUpdateOperation(TransportShardBulkAction.java:411) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.update(TransportShardBulkAction.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.handleItem(TransportShardBulkAction.java:138) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.onPrimaryShard(TransportShardBulkAction.java:120) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.bulk.TransportShardBulkAction.onPrimaryShard(TransportShardBulkAction.java:73) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportWriteAction.shardOperationOnPrimary(TransportWriteAction.java:75) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportWriteAction.shardOperationOnPrimary(TransportWriteAction.java:48) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform(TransportReplicationAction.java:905) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform(TransportReplicationAction.java:875) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:113) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse(TransportReplicationAction.java:323) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse(TransportReplicationAction.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse(TransportReplicationAction.java:855) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse(TransportReplicationAction.java:852) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.index.shard.IndexShardOperationsLock.acquire(IndexShardOperationsLock.java:142) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationLock(IndexShard.java:1655) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference(TransportReplicationAction.java:864) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction.access$400(TransportReplicationAction.java:90) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun(TransportReplicationAction.java:275) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:254) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:246) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor$ProfileSecuredRequestHandler.lambda$messageReceived$1(SecurityServerTransportInterceptor.java:208) ~[?:?]\r\n at org.elasticsearch.common.util.concurrent.EsExecutors$1.execute(EsExecutors.java:109) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor$ProfileSecuredRequestHandler.lambda$messageReceived$2(SecurityServerTransportInterceptor.java:246) ~[?:?]\r\n at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:53) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.transport.ServerTransportFilter$NodeProfile.lambda$null$0(ServerTransportFilter.java:135) ~[?:?]\r\n at org.elasticsearch.xpack.security.authz.AuthorizationUtils$AsyncAuthorizer.maybeRun(AuthorizationUtils.java:131) ~[?:?]\r\n at org.elasticsearch.xpack.security.authz.AuthorizationUtils$AsyncAuthorizer.setRunAsRoles(AuthorizationUtils.java:125) ~[?:?]\r\n at org.elasticsearch.xpack.security.authz.AuthorizationUtils$AsyncAuthorizer.authorize(AuthorizationUtils.java:113) ~[?:?]\r\n at org.elasticsearch.xpack.security.transport.ServerTransportFilter$NodeProfile.lambda$inbound$1(ServerTransportFilter.java:137) ~[?:?]\r\n at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:53) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$0(AuthenticationService.java:180) ~[x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$2(AuthenticationService.java:199) ~[x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:211) [x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:178) [x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:140) [x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:112) [x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.transport.ServerTransportFilter$NodeProfile.inbound(ServerTransportFilter.java:131) [x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor$ProfileSecuredRequestHandler.messageReceived(SecurityServerTransportInterceptor.java:253) [x-pack-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) [elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:577) [elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) [elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.1.1.jar:5.1.1]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_111]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_111]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_111]\r\n```", "created_at": "2017-01-13T06:10:51Z" }, { "body": "The problem still occurs on:\r\n\r\nElasticsearch version: 5.1.2\r\nPlugins installed: analysis-stempel 5.1.2", "created_at": "2017-01-13T09:40:20Z" }, { "body": "I'll try to get to the bottom of this. Can you share your full analysis chain? From the stack traces I see at least shingle and synonym filter after the stempel stemmer, but what tokenizers/filters come before it?", "created_at": "2017-01-13T10:15:06Z" }, { "body": "OK, nevermind on the full analysis chain @mod3st, I see the issue...\r\n\r\nThere is a thread safety bug in the TokenFilterFactory: https://github.com/elastic/elasticsearch/blob/master/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishStemTokenFilterFactory.java#L48\r\n\r\nThat `stemmer` cannot be shared across threads, but it is here. I'll open a PR to fix it to make a new stemmer in each `create` call like Lucene's factory.", "created_at": "2017-01-13T10:47:03Z" }, { "body": "I understand that in the previous version of ES every thread was creating a new instance of PolishStemTokenFilterFactory and now each thread uses the same object?\r\n\r\n(the problem doesn't occur on ES 2)", "created_at": "2017-01-13T11:17:19Z" }, { "body": "Hmm, @mod3st I still see the bug in ES 2.4.x's latest sources (the same `stemmer` is shared for all calls to `PolishStemTokenFilterFactory.create`, so as far as I can tell the bug should exist in ES 2.4 as well. Do you not see that?\r\n\r\nhttps://github.com/elastic/elasticsearch/blob/2.4/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishStemTokenFilterFactory.java#L48", "created_at": "2017-01-13T11:26:09Z" }, { "body": "I understand, but I don't get any exceptions in ES 2.4.3 and I wonder why.", "created_at": "2017-01-13T11:32:12Z" } ], "number": 21911, "title": "5.0.2 plugin analysis-stempel Array index out of range: -2" }
{ "body": "The class is incorrectly sharing a `StempelStemmer` instance, which is not thread safe, across multiple threads. Instead it should create a new instance for each call to `create`.\r\n\r\nCloses #21911 \r\n", "number": 22610, "review_comments": [], "title": "Fix thread safety of Stempel's token filter factory" }
{ "commits": [ { "message": "add failing thread-safety test case" }, { "message": "create new stemmer for each thread" } ], "files": [ { "diff": "@@ -35,20 +35,11 @@\n \n public class PolishStemTokenFilterFactory extends AbstractTokenFilterFactory {\n \n- private final StempelStemmer stemmer;\n-\n public PolishStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {\n super(indexSettings, name, settings);\n- Trie tire;\n- try {\n- tire = StempelStemmer.load(PolishAnalyzer.class.getResourceAsStream(PolishAnalyzer.DEFAULT_STEMMER_FILE));\n- } catch (IOException ex) {\n- throw new RuntimeException(\"Unable to load default stemming tables\", ex);\n- }\n- stemmer = new StempelStemmer(tire);\n }\n \n @Override public TokenStream create(TokenStream tokenStream) {\n- return new StempelFilter(tokenStream, stemmer);\n+ return new StempelFilter(tokenStream, new StempelStemmer(PolishAnalyzer.getDefaultTable()));\n }\n }", "filename": "plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishStemTokenFilterFactory.java", "status": "modified" }, { "diff": "@@ -19,12 +19,24 @@\n \n package org.elasticsearch.index.analysis;\n \n-import org.elasticsearch.AnalysisFactoryTestCase;\n-import org.elasticsearch.index.analysis.pl.PolishStemTokenFilterFactory;\n-\n+import java.io.IOException;\n import java.util.HashMap;\n import java.util.Map;\n \n+import org.apache.lucene.analysis.Analyzer;\n+import org.apache.lucene.analysis.BaseTokenStreamTestCase;\n+import org.apache.lucene.analysis.MockTokenizer;\n+import org.apache.lucene.analysis.TokenFilter;\n+import org.apache.lucene.analysis.Tokenizer;\n+import org.elasticsearch.AnalysisFactoryTestCase;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.common.UUIDs;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.env.Environment;\n+import org.elasticsearch.index.IndexSettings;\n+import org.elasticsearch.index.analysis.pl.PolishStemTokenFilterFactory;\n+\n public class AnalysisPolishFactoryTests extends AnalysisFactoryTestCase {\n \n @Override\n@@ -34,4 +46,31 @@ protected Map<String, Class<?>> getTokenFilters() {\n return filters;\n }\n \n+ public void testThreadSafety() throws IOException {\n+ // TODO: is this the right boilerplate? I forked this out of TransportAnalyzeAction.java:\n+ Settings settings = Settings.builder()\n+ // for _na_\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)\n+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)\n+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)\n+ .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())\n+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())\n+ .build();\n+ Environment environment = new Environment(settings);\n+ IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();\n+ IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY);\n+ testThreadSafety(new PolishStemTokenFilterFactory(indexSettings, environment, \"stempelpolishstem\", settings));\n+ }\n+\n+ // TODO: move to AnalysisFactoryTestCase so we can more easily test thread safety for all factories\n+ private void testThreadSafety(TokenFilterFactory factory) throws IOException {\n+ final Analyzer analyzer = new Analyzer() {\n+ @Override\n+ protected TokenStreamComponents createComponents(String fieldName) {\n+ Tokenizer tokenizer = new MockTokenizer();\n+ return new TokenStreamComponents(tokenizer, factory.create(tokenizer));\n+ }\n+ };\n+ BaseTokenStreamTestCase.checkRandomData(random(), analyzer, 100);\n+ }\n }", "filename": "plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/AnalysisPolishFactoryTests.java", "status": "modified" } ] }
{ "body": "<!--\r\nGitHub is reserved for bug reports and feature requests. The best place\r\nto ask a general question is at the Elastic Discourse forums at\r\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\r\na feature request, please include one and only one of the below blocks\r\nin your new issue. Note that whether you're filing a bug report or a\r\nfeature request, ensure that your submission is for an\r\n[OS that we support](https://www.elastic.co/support/matrix#show_os).\r\nBug reports on an OS that we do not support or feature requests\r\nspecific to an OS that we do not support will be closed.\r\n-->\r\n\r\n<!--\r\nIf you are filing a bug report, please remove the below feature\r\nrequest block and provide responses for all of the below items.\r\n-->\r\n\r\n**Elasticsearch version**: 5.0\r\n\r\n**Plugins installed**: default set\r\n\r\n**JVM version**: openjdk version \"1.8.0_101\"\r\n\r\n**OS version**: Centos 6\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nI have trying to append a field value to a string but I am getting an ArrayIndexOutOfBoundsException.\r\nIt seems that there is a problem with the `.getValue()` function for IP data type fields.\r\n\r\nIf anyone has a workaround I would much appreciate it.\r\n\r\nThis code does not work:\r\n```\r\ndef ip = doc['IP_SRC'].getValue();\r\n```\r\n\r\nthis code does not work:\r\n```\r\ndef acc = 'my_ip:';\r\nacc += doc['IP_SRC'];\r\n```\r\n\r\n**Steps to reproduce**:\r\n 1. Create an field with mapping type ip\r\n 2. Add documents some with doc values containing ip field data\r\n 3. Try use that field in a string in a Painless script (I am using my script as a Kibana scripted field)\r\n\r\n**Provide logs (if relevant)**:\r\n\r\nThis log doesn't seem to show the cause of the Out of Bounds exception but I believe its somewhere in the to UTF8 routines of `ByteRef`.\r\n\r\n```\r\n[2017-01-12T12:37:49,043][DEBUG][o.e.a.s.TransportSearchAction] [Z8qG7Y5] [nap-gi-2017.01.12][0], node[Z8qG7Y5IScyM2r06BN0Dbg], [P], s[STARTED], a[id=TpRuYNq6TE-cY47uz3VlsA]: Failed to execute [SearchRequest{searchType=QUERY_AND_FETCH, indices=[nap-gi-2017.01.12], indicesOptions=IndicesOptions[id=39, ignore_unavailable=true, allow_no_indices=true, expand_wildcards_open=true, expand_wildcards_closed=false, allow_alisases_to_multiple_indices=true, forbid_closed_indices=true], types=[], routing='null', preference='1484222965683', requestCache=null, scroll=null, source={\r\n \"size\" : 500,\r\n \"query\" : {\r\n \"bool\" : {\r\n \"must\" : [\r\n {\r\n \"query_string\" : {\r\n \"query\" : \"*\",\r\n \"fields\" : [ ],\r\n \"use_dis_max\" : true,\r\n \"tie_breaker\" : 0.0,\r\n \"default_operator\" : \"or\",\r\n \"auto_generate_phrase_queries\" : false,\r\n \"max_determined_states\" : 10000,\r\n \"lowercase_expanded_terms\" : true,\r\n \"enable_position_increment\" : true,\r\n \"fuzziness\" : \"AUTO\",\r\n \"fuzzy_prefix_length\" : 0,\r\n \"fuzzy_max_expansions\" : 50,\r\n \"phrase_slop\" : 0,\r\n \"analyze_wildcard\" : true,\r\n \"locale\" : \"und\",\r\n \"escape\" : false,\r\n \"boost\" : 1.0\r\n }\r\n },\r\n {\r\n \"range\" : {\r\n \"TS\" : {\r\n \"from\" : 1484222823984,\r\n \"to\" : 1484224623984,\r\n \"include_lower\" : true,\r\n \"include_upper\" : true,\r\n \"format\" : \"epoch_millis\",\r\n \"boost\" : 1.0\r\n }\r\n }\r\n }\r\n ],\r\n \"disable_coord\" : false,\r\n \"adjust_pure_negative\" : true,\r\n \"boost\" : 1.0\r\n }\r\n },\r\n \"_source\" : {\r\n \"includes\" : [ ],\r\n \"excludes\" : [ ]\r\n },\r\n \"stored_fields\" : \"*\",\r\n \"docvalue_fields\" : [\r\n \"TS\"\r\n ],\r\n \"script_fields\" : {\r\n \"search_token\" : {\r\n \"script\" : {\r\n \"inline\" : \"def json = '{';if (doc['IP_SRC'].size() > 0){\\n def asBytes = doc['IP_SRC'].getValue();\\n json += \\\"\\\\\\\"s\\\\\\\": \\\" + \\\",\\\";\\n}\\njson += '}';\\nreturn json\",\r\n \"lang\" : \"painless\"\r\n },\r\n \"ignore_failure\" : false\r\n }\r\n },\r\n \"sort\" : [\r\n {\r\n \"TS\" : {\r\n \"order\" : \"desc\",\r\n \"unmapped_type\" : \"boolean\"\r\n }\r\n }\r\n ],\r\n \"aggregations\" : {\r\n \"2\" : {\r\n \"date_histogram\" : {\r\n \"field\" : \"TS\",\r\n \"time_zone\" : \"Europe/London\",\r\n \"interval\" : \"30s\",\r\n \"offset\" : 0,\r\n \"order\" : {\r\n \"_key\" : \"asc\"\r\n },\r\n \"keyed\" : false,\r\n \"min_doc_count\" : 1\r\n }\r\n }\r\n },\r\n \"highlight\" : {\r\n \"pre_tags\" : [\r\n \"@kibana-highlighted-field@\"\r\n ],\r\n \"post_tags\" : [\r\n \"@/kibana-highlighted-field@\"\r\n ],\r\n \"fragment_size\" : 2147483647,\r\n \"require_field_match\" : false,\r\n \"fields\" : {\r\n \"*\" : { }\r\n }\r\n },\r\n \"ext\" : { }\r\n}}]\r\norg.elasticsearch.transport.RemoteTransportException: [Z8qG7Y5][127.0.0.1:9300][indices:data/read/search[phase/query+fetch]]\r\nCaused by: org.elasticsearch.script.ScriptException: runtime error\r\n at org.elasticsearch.painless.ScriptImpl.convertToScriptException(ScriptImpl.java:177) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:124) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_101]\r\nCaused by: java.lang.ArrayIndexOutOfBoundsException\r\n[2017-01-12T12:37:49,045][DEBUG][o.e.a.s.TransportSearchAction] [Z8qG7Y5] All shards failed for phase: [query_fetch]\r\norg.elasticsearch.transport.RemoteTransportException: [Z8qG7Y5][127.0.0.1:9300][indices:data/read/search[phase/query+fetch]]\r\nCaused by: org.elasticsearch.script.ScriptException: runtime error\r\n at org.elasticsearch.painless.ScriptImpl.convertToScriptException(ScriptImpl.java:177) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:124) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_101]\r\nCaused by: java.lang.ArrayIndexOutOfBoundsException\r\n```", "comments": [ { "body": "That stacktrace looks cut off. If that is how it is logged then we have another problem in addition to whatever you've found with ips. Also look out for #22393.", "created_at": "2017-01-12T14:42:23Z" }, { "body": "That is indeed how it is logged :) if I mess around with the script I abitni can sometimes get a longer stack trace which shows the source of the out of bounds in the UTF8 code", "created_at": "2017-01-12T15:26:55Z" }, { "body": "Funny you should mention #22393 as I had run into the string escape issue and I have seen that ticket. It does not seem to solve this issue", "created_at": "2017-01-12T15:28:14Z" }, { "body": "> That is indeed how it is logged :)\r\n\r\nGreat! We play some tricks with those stacktraces in an effort to make them a bit more useable but it looks like we've gone a bit too far.\r\n\r\n\r\n> Funny you should mention #22393 as I had run into the string escape issue and I have seen that ticket. It does not seem to solve this issue\r\n\r\nYeah. I just meant that you would have to work around that issue in addition to any workaround we find for this one.\r\n\r\nI'll start in on this as soon as I can get finished replying to other github issues.", "created_at": "2017-01-12T15:35:42Z" }, { "body": "thank you @nik9000! My hunch is that there needs to be a specific subclass in `ScriptDocValues` for IP fields. Here is a longer trace:\r\n\r\n```org.elasticsearch.transport.RemoteTransportException: [Z8qG7Y5][127.0.0.1:9300][indices:data/read/search[phase/query+fetch]]\r\nCaused by: org.elasticsearch.script.ScriptException: runtime error\r\n at org.elasticsearch.painless.ScriptImpl.convertToScriptException(ScriptImpl.java:177) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:124) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_101]\r\nCaused by: java.lang.ArrayIndexOutOfBoundsException: 16\r\n at org.apache.lucene.util.UnicodeUtil.UTF8toUTF16(UnicodeUtil.java:598) ~[lucene-core-6.2.0.jar:6.2.0 764d0f19151dbff6f5fcd9fc4b2682cf934590c5 - mike - 2016-08-20 05:39:36]\r\n at org.apache.lucene.util.BytesRef.utf8ToString(BytesRef.java:152) ~[lucene-core-6.2.0.jar:6.2.0 764d0f19151dbff6f5fcd9fc4b2682cf934590c5 - mike - 2016-08-20 05:39:36]\r\n at org.elasticsearch.index.fielddata.ScriptDocValues$Strings.getValue(ScriptDocValues.java:84) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at java.lang.invoke.MethodHandle.invokeWithArguments(MethodHandle.java:627) ~[?:1.8.0_101]\r\n at org.elasticsearch.painless.DefBootstrap$PIC.fallback(DefBootstrap.java:204) ~[?:?]\r\n at org.elasticsearch.painless.Executable$Script.execute(def json = '{'; ... @ <inline script>:332) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:121) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_101]\r\n[2017-01-12T12:14:30,913][DEBUG][o.e.a.s.TransportSearchAction] [Z8qG7Y5] All shards failed for phase: [query_fetch]\r\norg.elasticsearch.transport.RemoteTransportException: [Z8qG7Y5][127.0.0.1:9300][indices:data/read/search[phase/query+fetch]]\r\nCaused by: org.elasticsearch.script.ScriptException: runtime error\r\n at org.elasticsearch.painless.ScriptImpl.convertToScriptException(ScriptImpl.java:177) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:124) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_101]\r\nCaused by: java.lang.ArrayIndexOutOfBoundsException: 16\r\n at org.apache.lucene.util.UnicodeUtil.UTF8toUTF16(UnicodeUtil.java:598) ~[lucene-core-6.2.0.jar:6.2.0 764d0f19151dbff6f5fcd9fc4b2682cf934590c5 - mike - 2016-08-20 05:39:36]\r\n at org.apache.lucene.util.BytesRef.utf8ToString(BytesRef.java:152) ~[lucene-core-6.2.0.jar:6.2.0 764d0f19151dbff6f5fcd9fc4b2682cf934590c5 - mike - 2016-08-20 05:39:36]\r\n at org.elasticsearch.index.fielddata.ScriptDocValues$Strings.getValue(ScriptDocValues.java:84) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at java.lang.invoke.MethodHandle.invokeWithArguments(MethodHandle.java:627) ~[?:1.8.0_101]\r\n at org.elasticsearch.painless.DefBootstrap$PIC.fallback(DefBootstrap.java:204) ~[?:?]\r\n at org.elasticsearch.painless.Executable$Script.execute(def json = '{'; ... @ <inline script>:332) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:121) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_101]\r\n```\r\n\r\nI got that with this script and don't understand really what makes it different :-) :\r\n\r\n```\r\n String ip = doc['IP_SRC'].getValue();\r\n```\r\n\r\nAlso I am not sure if it helps or if this is a different issue but if I do the following script I get a NoSuchElement exception (I thought maybe its because the field is null but even if i wrap it in a if statement i get the same issue):\r\n\r\n```\r\ndef mystring = 'myip:';\r\nmystring += doc['IP_SRC'];\r\n```\r\n\r\n```\r\norg.elasticsearch.transport.RemoteTransportException: [Z8qG7Y5][127.0.0.1:9300][indices:data/read/search[phase/query+fetch]]\r\nCaused by: org.elasticsearch.script.ScriptException: runtime error\r\n at org.elasticsearch.painless.ScriptImpl.convertToScriptException(ScriptImpl.java:177) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:124) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_101]\r\nCaused by: java.util.NoSuchElementException\r\n at java.util.AbstractList$Itr.next(AbstractList.java:364) ~[?:1.8.0_101]\r\n at java.util.AbstractCollection.toString(AbstractCollection.java:461) ~[?:1.8.0_101]\r\n at java.lang.String.valueOf(String.java:2994) ~[?:1.8.0_101]\r\n at java.lang.StringBuilder.append(StringBuilder.java:131) ~[?:1.8.0_101]\r\n at org.elasticsearch.painless.Executable$Script.execute(def json = '{'; ... @ <inline script>:325) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:121) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_101]\r\n[2017-01-12T15:07:50,668][DEBUG][o.e.a.s.TransportSearchAction] [Z8qG7Y5] All shards failed for phase: [query_fetch]\r\norg.elasticsearch.transport.RemoteTransportException: [Z8qG7Y5][127.0.0.1:9300][indices:data/read/search[phase/query+fetch]]\r\nCaused by: org.elasticsearch.script.ScriptException: runtime error\r\n at org.elasticsearch.painless.ScriptImpl.convertToScriptException(ScriptImpl.java:177) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:124) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) [elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_101]\r\nCaused by: java.util.NoSuchElementException\r\n at java.util.AbstractList$Itr.next(AbstractList.java:364) ~[?:1.8.0_101]\r\n at java.util.AbstractCollection.toString(AbstractCollection.java:461) ~[?:1.8.0_101]\r\n at java.lang.String.valueOf(String.java:2994) ~[?:1.8.0_101]\r\n at java.lang.StringBuilder.append(StringBuilder.java:131) ~[?:1.8.0_101]\r\n at org.elasticsearch.painless.Executable$Script.execute(def json = '{'; ... @ <inline script>:325) ~[?:?]\r\n at org.elasticsearch.painless.ScriptImpl.run(ScriptImpl.java:121) ~[?:?]\r\n at org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase.hitExecute(ScriptFieldsFetchSubPhase.java:52) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:358) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$9(SearchTransportService.java:291) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:504) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_101]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_101]\r\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_101]\r\n```", "created_at": "2017-01-12T15:56:54Z" }, { "body": "> My hunch is that there needs to be a specific subclass in ScriptDocValues for IP fields.\r\n\r\nLooks like @jpountz already got it: https://github.com/elastic/elasticsearch/commit/cbd43401300a76e6f4431654f8b32beb94938897\r\n\r\n\r\n*but* there still is an issue in master:\r\n```\r\nUnable to find dynamic method [getValue] with [0] arguments for class [org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues]\r\n```\r\n\r\nIt looks like we're missing a whitelist entry for painless. Digging....", "created_at": "2017-01-12T16:06:35Z" }, { "body": "Ok - I've almost got it but I can't figure out why painless can't find a class. I'm going to take a break and come back in a bit.", "created_at": "2017-01-12T16:50:04Z" }, { "body": "@nik9000 I'm not seeing this in the whitelist in master.", "created_at": "2017-01-12T17:34:22Z" }, { "body": "> @nik9000 I'm not seeing this in the whitelist in master.\r\n\r\nYeah, I'm whitelisting it now and making a test for them all. But I'm having trouble with class not found exceptions.", "created_at": "2017-01-12T19:12:38Z" }, { "body": "Ahh, sorry I misunderstood.", "created_at": "2017-01-12T19:13:15Z" }, { "body": "Thank you @nik9000", "created_at": "2017-01-12T21:11:52Z" }, { "body": "> Thank you @nik9000\r\n\r\nYou are quite welcome! You'll get it in 5.2.0 which isn't quite ready yet, but is coming soonish.", "created_at": "2017-01-12T21:13:24Z" } ], "number": 22584, "title": "Unable to cast IP DataType to String in Painless Script" }
{ "body": "Without this whitelist painless can't use ip or binary doc values.\r\n\r\nCloses #22584\r\n", "number": 22600, "review_comments": [], "title": "Whitelist some ScriptDocValues in painless" }
{ "commits": [ { "message": "Whitelist some ScriptDocValues in painless\n\nWithout this whitelist painless can't use ip or binary doc values.\n\nCloses #22584" } ], "files": [ { "diff": "@@ -19,7 +19,6 @@\n \n package org.elasticsearch.index.mapper;\n \n-import org.apache.lucene.document.Field;\n import org.apache.lucene.document.InetAddressPoint;\n import org.apache.lucene.document.SortedSetDocValuesField;\n import org.apache.lucene.document.StoredField;\n@@ -32,7 +31,6 @@\n import org.apache.lucene.search.MatchNoDocsQuery;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.util.BytesRef;\n-import org.elasticsearch.Version;\n import org.elasticsearch.action.fieldstats.FieldStats;\n import org.elasticsearch.common.Explicit;\n import org.elasticsearch.common.Nullable;\n@@ -41,7 +39,6 @@\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.fielddata.IndexFieldData;\n import org.elasticsearch.index.fielddata.ScriptDocValues;\n-import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;\n import org.elasticsearch.index.query.QueryShardContext;\n import org.elasticsearch.search.DocValueFormat;\n@@ -51,11 +48,9 @@\n import java.net.InetAddress;\n import java.util.AbstractList;\n import java.util.Arrays;\n-import java.util.Collection;\n import java.util.Collections;\n import java.util.Iterator;\n import java.util.List;\n-import java.util.ListIterator;\n import java.util.Map;\n \n /** A {@link FieldMapper} for ip addresses. */\n@@ -238,7 +233,7 @@ public FieldStats.Ip stats(IndexReader reader) throws IOException {\n InetAddressPoint.decode(min), InetAddressPoint.decode(max));\n }\n \n- private static class IpScriptDocValues extends AbstractList<String> implements ScriptDocValues<String> {\n+ public static final class IpScriptDocValues extends AbstractList<String> implements ScriptDocValues<String> {\n \n private final RandomAccessOrds values;\n ", "filename": "core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java", "status": "modified" }, { "diff": "@@ -566,11 +566,11 @@ private Map<String,List<String>> addStructs() {\n }\n if (line.startsWith(\"class \")) {\n String elements[] = line.split(\"\\u0020\");\n- assert elements[2].equals(\"->\");\n+ assert elements[2].equals(\"->\") : \"Invalid struct definition [\" + String.join(\" \", elements) +\"]\";\n if (elements.length == 7) {\n hierarchy.put(elements[1], Arrays.asList(elements[5].split(\",\")));\n } else {\n- assert elements.length == 5;\n+ assert elements.length == 5 : \"Invalid struct definition [\" + String.join(\" \", elements) + \"]\";\n }\n String className = elements[1];\n String javaPeer = elements[3];\n@@ -612,7 +612,7 @@ private Map<String,List<String>> addStructs() {\n }\n }\n } catch (Exception e) {\n- throw new RuntimeException(\"syntax error in \" + file + \", line: \" + currentLine, e);\n+ throw new RuntimeException(\"error in \" + file + \", line: \" + currentLine, e);\n }\n }\n return hierarchy;", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java", "status": "modified" }, { "diff": "@@ -114,6 +114,26 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.Booleans -> org.elastics\n List getValues()\n }\n \n+class org.elasticsearch.index.fielddata.ScriptDocValues.BytesRefs -> org.elasticsearch.index.fielddata.ScriptDocValues$BytesRefs extends List,Collection,Iterable,Object {\n+ BytesRef get(int)\n+ BytesRef getValue()\n+ List getValues()\n+}\n+\n+class BytesRef -> org.apache.lucene.util.BytesRef extends Object {\n+ byte[] bytes\n+ int offset\n+ int length\n+ boolean bytesEquals(BytesRef)\n+ String utf8ToString()\n+}\n+\n+class org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues -> org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues extends List,Collection,Iterable,Object {\n+ String get(int)\n+ String getValue()\n+ List getValues()\n+}\n+\n # for testing.\n # currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods\n class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.FeatureTest extends Object {", "filename": "modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt", "status": "modified" }, { "diff": "@@ -0,0 +1,368 @@\n+setup:\n+ - do:\n+ indices.create:\n+ index: test\n+ body:\n+ mappings:\n+ test:\n+ properties:\n+ binary:\n+ type: binary\n+ doc_values: true\n+ boolean:\n+ type: boolean\n+ date:\n+ type: date\n+ geo_point:\n+ type: geo_point\n+ ip:\n+ type: ip\n+ keyword:\n+ type: keyword\n+ long:\n+ type: long\n+ integer:\n+ type: integer\n+ short:\n+ type: short\n+ byte:\n+ type: byte\n+ double:\n+ type: double\n+ float:\n+ type: float\n+ half_float:\n+ type: half_float\n+ scaled_float:\n+ type: scaled_float\n+ scaling_factor: 100\n+ token_count:\n+ type: token_count\n+ analyzer: standard\n+\n+ - do:\n+ index:\n+ index: test\n+ type: test\n+ id: 1\n+ body:\n+ binary: U29tZSBiaW5hcnkgYmxvYg==\n+ boolean: true\n+ date: 2017-01-01T12:11:12\n+ geo_point: 41.12,-71.34\n+ ip: 192.168.0.1\n+ keyword: not split at all\n+ long: 12348732141234\n+ integer: 134134566\n+ short: 1324\n+ byte: 12\n+ double: 3.14159265358979\n+ float: 3.141592654\n+ half_float: 3.140625\n+ scaled_float: 3.14\n+ token_count: count all these words please\n+\n+ - do:\n+ indices.refresh: {}\n+\n+---\n+\"binary\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['binary'].get(0).utf8ToString()\"\n+ - match: { hits.hits.0.fields.field.0: \"Some binary blob\" }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['binary'].value.utf8ToString()\"\n+ - match: { hits.hits.0.fields.field.0: \"Some binary blob\" }\n+\n+---\n+\"boolean\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['boolean'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: true }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['boolean'].value\"\n+ - match: { hits.hits.0.fields.field.0: true }\n+\n+---\n+\"date\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['date'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 1483272672000 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['date'].value\"\n+ - match: { hits.hits.0.fields.field.0: 1483272672000 }\n+\n+---\n+\"geo_point\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['geo_point'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0.lat: 41.1199999647215 }\n+ - match: { hits.hits.0.fields.field.0.lon: -71.34000004269183 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['geo_point'].value\"\n+ - match: { hits.hits.0.fields.field.0.lat: 41.1199999647215 }\n+ - match: { hits.hits.0.fields.field.0.lon: -71.34000004269183 }\n+\n+---\n+\"ip\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['ip'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: \"192.168.0.1\" }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['ip'].value\"\n+ - match: { hits.hits.0.fields.field.0: \"192.168.0.1\" }\n+\n+---\n+\"keyword\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['keyword'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: \"not split at all\" }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['keyword'].value\"\n+ - match: { hits.hits.0.fields.field.0: \"not split at all\" }\n+\n+---\n+\"long\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['long'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 12348732141234 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['long'].value\"\n+ - match: { hits.hits.0.fields.field.0: 12348732141234 }\n+\n+---\n+\"integer\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['integer'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 134134566 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['integer'].value\"\n+ - match: { hits.hits.0.fields.field.0: 134134566 }\n+\n+---\n+\"short\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['short'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 1324 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['short'].value\"\n+ - match: { hits.hits.0.fields.field.0: 1324 }\n+\n+---\n+\"byte\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['byte'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 12 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['byte'].value\"\n+ - match: { hits.hits.0.fields.field.0: 12 }\n+\n+---\n+\"double\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['double'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 3.14159265358979 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['double'].value\"\n+ - match: { hits.hits.0.fields.field.0: 3.14159265358979 }\n+\n+---\n+\"float\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['float'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 3.1415927410125732 } # this ends up as a double\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['float'].value\"\n+ - match: { hits.hits.0.fields.field.0: 3.1415927410125732 } # this ends up as a double\n+\n+---\n+\"half_float\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['half_float'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 3.140625 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['half_float'].value\"\n+ - match: { hits.hits.0.fields.field.0: 3.140625 }\n+\n+---\n+\"scaled_float\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['scaled_float'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 3.14 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['scaled_float'].value\"\n+ - match: { hits.hits.0.fields.field.0: 3.14 }\n+\n+---\n+\"token_count\":\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['token_count'].get(0)\"\n+ - match: { hits.hits.0.fields.field.0: 5 }\n+\n+ - do:\n+ search:\n+ body:\n+ script_fields:\n+ field:\n+ script:\n+ inline: \"doc['token_count'].value\"\n+ - match: { hits.hits.0.fields.field.0: 5 }", "filename": "modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yaml", "status": "added" } ] }
{ "body": "**Elasticsearch version**: `5.1`\r\n\r\n**Plugins installed**: none\r\n\r\n**JVM version**: \r\n```\r\nJava(TM) SE Runtime Environment (build 1.8.0_111-b14)\r\nJava HotSpot(TM) 64-Bit Server VM (build 25.111-b14, mixed mode)\r\n```\r\n**OS version**: `Ubuntu 14.04.1 LTS`\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nI don't know if this is a bug or a feature, but in my opinion it shouldn't work like that.\r\n\r\nWhen filtering source (`_source`), there are returned fields that contain only part of the string.\r\n\r\nLet's say we have source object looking like this: \r\n```\r\n{\r\n \"photos\": [\"a.jpg\", \"b.jpg\"],\r\n \"photosCount\": 2\r\n}\r\n```\r\nWhen filtering like this: `\"_source\": [\"photosCount\"]`, `photos` field is also returned.\r\n(same for `\"_source\": \"photosCount\"` and `\"_source\": {\"include\": [\"photosCount\"]}`)\r\nNow I have to explicitly exclude `photos`, for not having it in response.\r\n\r\n**Steps to reproduce**:\r\n\r\n 1.\r\n```\r\ncurl -XPOST 'localhost:9200/some_thing/some_type' -d '\r\n{\r\n \"photos\": [\"a.jpg\", \"b.jpg\"],\r\n \"photosCount\": 2\r\n}\r\n```\r\n 2.\r\n```\r\ncurl -XPOST 'localhost:9200/some_thing/some_type/_search?pretty' -d '\r\n{\r\n \"_source\": \"photosCount\",\r\n \"query\": {}\r\n}\r\n```", "comments": [ { "body": "May be related to https://github.com/elastic/elasticsearch/pull/20736", "created_at": "2017-01-11T15:23:25Z" }, { "body": "@mikemccand Could you take a look at this please? I think the automaton change in #20736 has a bug", "created_at": "2017-01-11T15:24:37Z" }, { "body": "I just confirmed in master with this:\r\n```\r\nPOST /some_thing/some_type\r\n{\r\n \"photos\": [\"a.jpg\", \"b.jpg\"],\r\n \"photosCount\": 2\r\n}\r\n\r\nPOST /some_thing/some_type/_search?pretty\r\n{\r\n \"_source\": \"photosCount\"\r\n}\r\n```", "created_at": "2017-01-11T15:27:06Z" }, { "body": "I'll have a look @clintongormley ", "created_at": "2017-01-12T15:06:56Z" } ], "number": 22557, "title": "source filtering (_source) field matching too loose" }
{ "body": "Source filtering was incorrectly always accepting array items even if the include pattern did not match.\r\n\r\nCloses #22557\r\n", "number": 22593, "review_comments": [ { "body": "`assertThat((List) filteredMap.get(\"array\"), hasSize(1))` has better error messages.", "created_at": "2017-01-12T16:45:57Z" }, { "body": "But you are just fixing a typo so you can skip it.", "created_at": "2017-01-12T16:46:34Z" }, { "body": "Oh thank you for that pointer; I didn't know about `hasSize` ... I'll change it", "created_at": "2017-01-12T16:51:19Z" } ], "title": "Source filtering: only accept array items if the previous include pattern matches" }
{ "commits": [ { "message": "Source filtering: only accept array items if the previous include pattern matches\n\nSource filtering was always accepting array items even if the include pattern did not match.\n\nCloses #22557" }, { "message": "Use Matchers.hasSize() to assert size of lists" } ], "files": [ { "diff": "@@ -263,7 +263,7 @@ private static Map<String, Object> filter(Map<String, ?> map,\n \n List<Object> filteredValue = filter((Iterable<?>) value,\n subIncludeAutomaton, subIncludeState, excludeAutomaton, excludeState, matchAllAutomaton);\n- if (includeAutomaton.isAccept(includeState) || filteredValue.isEmpty() == false) {\n+ if (filteredValue.isEmpty() == false) {\n filtered.put(key, filteredValue);\n }\n \n@@ -286,6 +286,7 @@ private static List<Object> filter(Iterable<?> iterable,\n CharacterRunAutomaton excludeAutomaton, int initialExcludeState,\n CharacterRunAutomaton matchAllAutomaton) {\n List<Object> filtered = new ArrayList<>();\n+ boolean isInclude = includeAutomaton.isAccept(initialIncludeState);\n for (Object value : iterable) {\n if (value instanceof Map) {\n int includeState = includeAutomaton.step(initialIncludeState, '.');\n@@ -304,9 +305,8 @@ private static List<Object> filter(Iterable<?> iterable,\n if (filteredValue.isEmpty() == false) {\n filtered.add(filteredValue);\n }\n- } else {\n- // TODO: we have tests relying on this behavior on arrays even\n- // if the path does not match, but this looks like a bug?\n+ } else if (isInclude) {\n+ // #22557: only accept this array value if the key we are on is accepted:\n filtered.add(value);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java", "status": "modified" }, { "diff": "@@ -41,6 +41,7 @@\n import static java.util.Collections.singletonMap;\n import static org.hamcrest.Matchers.hasEntry;\n import static org.hamcrest.Matchers.hasKey;\n+import static org.hamcrest.Matchers.hasSize;\n import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.nullValue;\n import static org.hamcrest.core.IsEqual.equalTo;\n@@ -148,8 +149,8 @@ public void testExtractValue() throws Exception {\n extValue = XContentMapValues.extractValue(\"path1.test\", map);\n assertThat(extValue, instanceOf(List.class));\n \n- List extListValue = (List) extValue;\n- assertThat(extListValue.size(), equalTo(2));\n+ List<?> extListValue = (List) extValue;\n+ assertThat(extListValue, hasSize(2));\n \n builder = XContentFactory.jsonBuilder().startObject()\n .startObject(\"path1\")\n@@ -168,7 +169,7 @@ public void testExtractValue() throws Exception {\n assertThat(extValue, instanceOf(List.class));\n \n extListValue = (List) extValue;\n- assertThat(extListValue.size(), equalTo(2));\n+ assertThat(extListValue, hasSize(2));\n assertThat(extListValue.get(0).toString(), equalTo(\"value1\"));\n assertThat(extListValue.get(1).toString(), equalTo(\"value2\"));\n \n@@ -234,9 +235,9 @@ public void testPrefixedNamesFilteringTest() {\n Map<String, Object> map = new HashMap<>();\n map.put(\"obj\", \"value\");\n map.put(\"obj_name\", \"value_name\");\n- Map<String, Object> filterdMap = XContentMapValues.filter(map, new String[]{\"obj_name\"}, Strings.EMPTY_ARRAY);\n- assertThat(filterdMap.size(), equalTo(1));\n- assertThat((String) filterdMap.get(\"obj_name\"), equalTo(\"value_name\"));\n+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{\"obj_name\"}, Strings.EMPTY_ARRAY);\n+ assertThat(filteredMap.size(), equalTo(1));\n+ assertThat((String) filteredMap.get(\"obj_name\"), equalTo(\"value_name\"));\n }\n \n \n@@ -251,19 +252,17 @@ public void testNestedFiltering() {\n put(\"nested\", 2);\n put(\"nested_2\", 3);\n }}));\n- Map<String, Object> falteredMap = XContentMapValues.filter(map, new String[]{\"array.nested\"}, Strings.EMPTY_ARRAY);\n- assertThat(falteredMap.size(), equalTo(1));\n+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{\"array.nested\"}, Strings.EMPTY_ARRAY);\n+ assertThat(filteredMap.size(), equalTo(1));\n \n- // Selecting members of objects within arrays (ex. [ 1, { nested: \"value\"} ]) always returns all values in the array (1 in the ex)\n- // this is expected behavior as this types of objects are not supported in ES\n- assertThat((Integer) ((List) falteredMap.get(\"array\")).get(0), equalTo(1));\n- assertThat(((Map<String, Object>) ((List) falteredMap.get(\"array\")).get(1)).size(), equalTo(1));\n- assertThat((Integer) ((Map<String, Object>) ((List) falteredMap.get(\"array\")).get(1)).get(\"nested\"), equalTo(2));\n+ assertThat(((List<?>) filteredMap.get(\"array\")), hasSize(1));\n+ assertThat(((Map<String, Object>) ((List) filteredMap.get(\"array\")).get(0)).size(), equalTo(1));\n+ assertThat((Integer) ((Map<String, Object>) ((List) filteredMap.get(\"array\")).get(0)).get(\"nested\"), equalTo(2));\n \n- falteredMap = XContentMapValues.filter(map, new String[]{\"array.*\"}, Strings.EMPTY_ARRAY);\n- assertThat(falteredMap.size(), equalTo(1));\n- assertThat((Integer) ((List) falteredMap.get(\"array\")).get(0), equalTo(1));\n- assertThat(((Map<String, Object>) ((List) falteredMap.get(\"array\")).get(1)).size(), equalTo(2));\n+ filteredMap = XContentMapValues.filter(map, new String[]{\"array.*\"}, Strings.EMPTY_ARRAY);\n+ assertThat(filteredMap.size(), equalTo(1));\n+ assertThat(((List<?>) filteredMap.get(\"array\")), hasSize(1));\n+ assertThat(((Map<String, Object>) ((List) filteredMap.get(\"array\")).get(0)).size(), equalTo(2));\n \n map.clear();\n map.put(\"field\", \"value\");\n@@ -272,16 +271,16 @@ public void testNestedFiltering() {\n put(\"field\", \"value\");\n put(\"field2\", \"value2\");\n }});\n- falteredMap = XContentMapValues.filter(map, new String[]{\"obj.field\"}, Strings.EMPTY_ARRAY);\n- assertThat(falteredMap.size(), equalTo(1));\n- assertThat(((Map<String, Object>) falteredMap.get(\"obj\")).size(), equalTo(1));\n- assertThat((String) ((Map<String, Object>) falteredMap.get(\"obj\")).get(\"field\"), equalTo(\"value\"));\n+ filteredMap = XContentMapValues.filter(map, new String[]{\"obj.field\"}, Strings.EMPTY_ARRAY);\n+ assertThat(filteredMap.size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredMap.get(\"obj\")).size(), equalTo(1));\n+ assertThat((String) ((Map<String, Object>) filteredMap.get(\"obj\")).get(\"field\"), equalTo(\"value\"));\n \n- falteredMap = XContentMapValues.filter(map, new String[]{\"obj.*\"}, Strings.EMPTY_ARRAY);\n- assertThat(falteredMap.size(), equalTo(1));\n- assertThat(((Map<String, Object>) falteredMap.get(\"obj\")).size(), equalTo(2));\n- assertThat((String) ((Map<String, Object>) falteredMap.get(\"obj\")).get(\"field\"), equalTo(\"value\"));\n- assertThat((String) ((Map<String, Object>) falteredMap.get(\"obj\")).get(\"field2\"), equalTo(\"value2\"));\n+ filteredMap = XContentMapValues.filter(map, new String[]{\"obj.*\"}, Strings.EMPTY_ARRAY);\n+ assertThat(filteredMap.size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredMap.get(\"obj\")).size(), equalTo(2));\n+ assertThat((String) ((Map<String, Object>) filteredMap.get(\"obj\")).get(\"field\"), equalTo(\"value\"));\n+ assertThat((String) ((Map<String, Object>) filteredMap.get(\"obj\")).get(\"field2\"), equalTo(\"value2\"));\n \n }\n \n@@ -323,7 +322,7 @@ public void testCompleteObjectFiltering() {\n \n filteredMap = XContentMapValues.filter(map, new String[]{\"array\"}, new String[]{\"*.field2\"});\n assertThat(filteredMap.size(), equalTo(1));\n- assertThat(((List) filteredMap.get(\"array\")).size(), equalTo(2));\n+ assertThat(((List<?>) filteredMap.get(\"array\")), hasSize(2));\n assertThat((Integer) ((List) filteredMap.get(\"array\")).get(0), equalTo(1));\n assertThat(((Map<String, Object>) ((List) filteredMap.get(\"array\")).get(1)).size(), equalTo(1));\n assertThat(((Map<String, Object>) ((List) filteredMap.get(\"array\")).get(1)).get(\"field\").toString(), equalTo(\"value\"));\n@@ -436,20 +435,20 @@ public void testNotOmittingObjectWithNestedExcludedObject() throws Exception {\n \n assertThat(filteredSource.size(), equalTo(1));\n assertThat(filteredSource, hasKey(\"obj1\"));\n- assertThat(((Map) filteredSource.get(\"obj1\")).size(), Matchers.equalTo(0));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), equalTo(0));\n \n // explicit include\n filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj1\"}, new String[]{\"*.obj2\"});\n assertThat(filteredSource.size(), equalTo(1));\n assertThat(filteredSource, hasKey(\"obj1\"));\n- assertThat(((Map) filteredSource.get(\"obj1\")).size(), Matchers.equalTo(0));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), equalTo(0));\n \n // wild card include\n filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"*.obj2\"}, new String[]{\"*.obj3\"});\n assertThat(filteredSource.size(), equalTo(1));\n assertThat(filteredSource, hasKey(\"obj1\"));\n assertThat(((Map<String, Object>) filteredSource.get(\"obj1\")), hasKey(\"obj2\"));\n- assertThat(((Map) ((Map) filteredSource.get(\"obj1\")).get(\"obj2\")).size(), Matchers.equalTo(0));\n+ assertThat(((Map) ((Map) filteredSource.get(\"obj1\")).get(\"obj2\")).size(), equalTo(0));\n }\n \n @SuppressWarnings({\"unchecked\"})\n@@ -589,4 +588,15 @@ public void testSharedPrefixes() {\n assertEquals(Collections.singletonMap(\"foobar\", 2), XContentMapValues.filter(map, new String[] {\"foobar\"}, new String[0]));\n assertEquals(Collections.singletonMap(\"foobaz\", 3), XContentMapValues.filter(map, new String[0], new String[] {\"foobar\"}));\n }\n+\n+ public void testPrefix() {\n+ Map<String, Object> map = new HashMap<>();\n+ map.put(\"photos\", Arrays.asList(new String[] {\"foo\", \"bar\"}));\n+ map.put(\"photosCount\", 2);\n+\n+ Map<String, Object> filtered = XContentMapValues.filter(map, new String[] {\"photosCount\"}, new String[0]);\n+ Map<String, Object> expected = new HashMap<>();\n+ expected.put(\"photosCount\", 2);\n+ assertEquals(expected, filtered);\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java", "status": "modified" } ] }
{ "body": "\r\n**Elasticsearch version**:\r\nReproducible on:\r\n - 5.1.1\r\n - 2.3.3\r\nand I suspect all versions in between.\r\n\r\n**Plugins installed**: []\r\nx-pack (this issue should be reproducible without this plugin installed.)\r\n\r\n**JVM version**:\r\n1.8.0_111\r\n\r\n**OS version**:\r\nUbuntu - Linux peter-Inspiron-7520 4.4.0-53-generic #74-Ubuntu SMP Fri Dec 2 15:59:10 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux\r\nAlso tested on OSX Sierra\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nWhen allocating shards away from more than one node using `cluster.routing.allocation.exclude._ip`, specifying a space after the comma in the comma delimited list causes only the first ip to be used in the exclusion (though all are listed in cluster settings), all subsequent ips in the list are ignored.\r\n\r\nMy test setup was a couple of machines, with one machine running two instances of ES on different ports and the other with a single instance of elasticsearch, clustered over a network.\r\nThe machine with two instances used the same ip address but different ports and my testing was to exclude the shards from allocating to the machine running the single instance.\r\n\r\nThe method I used for this was to exclude both an unused ip (ot used by any node in the cluster) and the ip of the single instance host, by listing the unused ip first in the list.\r\n\r\nThe effect was that the first ip in the list was the only one used for the exclusion if using a space after the comma in the list. Though cluster settings showed the entire string when retrieving the settings again. So it was Elasticsearch reading that string but only using the first ip in the list.\r\nWithout using any spaces, all the ips in the list were successfully excluded.\r\n\r\nI suggest we either:\r\n - have Elasticsearch strip out the whitespace after commas\r\n - or we specify a strict format that does not allow spaces after the commas and reject any calls to the API that send an \"invalid\" string with spaces after the commas with an error message telling the user why it was rejected\r\n - and with either of the above options, validate that the ips in the list are valid ipv4 or ipv6 ip addresses and throw an error if they are not valid (if we don't do this already)\r\n\r\nThe current user experience with this configuration setting is poor and needs to be fixed so that Elasticsearch does not \"fail\" silently and also behaves predictably.\r\nDocumentation should also be improved to reflect detail around how Elasticsearch works after whatever method is chosen to fix the bug.\r\n\r\n\r\n**Steps to reproduce**:\r\n\r\n```\r\n# exclude list using fake first ip and real second ip\r\n#\r\n\r\n# first with space after comma, nothing happens since only the first (unused) ip is recognised in the cluster allocation exclusion\r\nPUT _cluster/settings\r\n{\r\n \"transient\" : {\r\n \"cluster.routing.allocation.exclude._ip\" : \"192.168.178.35, 192.168.178.21\"\r\n }\r\n}\r\n\r\n# second with the space after the comma removed, which ensures all ips are recognized in the allocation exclusion\r\nPUT _cluster/settings\r\n{\r\n \"transient\" : {\r\n \"cluster.routing.allocation.exclude._ip\" : \"192.168.178.35,192.168.178.21\"\r\n }\r\n}\r\n\r\n# reset to no ips to reallocate shards evenly again\r\nPUT _cluster/settings\r\n{\r\n \"transient\" : {\r\n \"cluster.routing.allocation.exclude._ip\" : \"\"\r\n }\r\n}\r\n```\r\n\r\n\r\n\r\n\r\n**Provide logs (if relevant)**:\r\nLogs only show:\r\n```\r\n[2016-12-21T16:51:37,778][INFO ][o.e.c.s.ClusterSettings ] [mac] updating [cluster.routing.allocation.exclude.] from [{}] to [{\"_ip\":\"192.168.178.35, 192.168.178.21\"}]\r\n[2016-12-21T16:51:58,775][INFO ][o.e.c.s.ClusterSettings ] [mac] updating [cluster.routing.allocation.exclude.] from [{\"_ip\":\"192.168.178.35, 192.168.178.21\"}] to [{\"_ip\":\"192.168.178.35,192.168.178.21\"}]\r\n[2016-12-21T16:53:16,101][INFO ][o.e.c.s.ClusterSettings ] [mac] updating [cluster.routing.allocation.exclude.] from [{\"_ip\":\"192.168.178.35,192.168.178.21\"}] to [{\"_ip\":\"\"}]\r\n```", "comments": [ { "body": "Related to https://github.com/elastic/elasticsearch/pull/21790", "created_at": "2016-12-21T16:26:12Z" } ], "number": 22297, "title": "cluster.routing.allocation.exclude._ip using a space after each comma only uses the first ip" }
{ "body": "Previously, certain settings that could take multiple comma delimited\r\nvalues would pick up incorrect values for all entries but the first if\r\neach comma separated value was followed by a whitespace character. For\r\nexample, the multi-value \"A,B,C\" would be correctly parsed as\r\n[\"A\", \"B\", \"C\"] but the multi-value \"A, B, C\" would be incorrectly parsed\r\nas [\"A\", \" B\", \" C\"].\r\n\r\nThis commit allows a comma separated list to have whitespace characters\r\nafter each entry. The specific settings that were affected by this are:\r\n\r\n cluster.routing.allocation.awareness.attributes\r\n index.routing.allocation.require.*\r\n index.routing.allocation.include.*\r\n index.routing.allocation.exclude.*\r\n cluster.routing.allocation.require.*\r\n cluster.routing.allocation.include.*\r\n cluster.routing.allocation.exclude.*\r\n http.cors.allow-methods\r\n http.cors.allow-headers\r\n\r\nFor the allocation filtering related settings, this commit also provides\r\nvalidation of each specified entry if the filtering is done by _ip,\r\n_host_ip, or _publish_ip, to ensure that each entry is a valid IP\r\naddress.\r\n\r\nCloses #22297", "number": 22591, "review_comments": [ { "body": "can you put some javadocs on this", "created_at": "2017-01-12T15:26:58Z" }, { "body": "why don't you use this as a validator on the actual setting?", "created_at": "2017-01-12T15:27:57Z" }, { "body": "see above", "created_at": "2017-01-12T15:28:03Z" }, { "body": "see above", "created_at": "2017-01-12T15:28:08Z" }, { "body": "👍 ", "created_at": "2017-01-12T15:28:14Z" }, { "body": "hmm use the validator on the actual setting instead?", "created_at": "2017-01-12T15:28:41Z" }, { "body": "done", "created_at": "2017-01-12T16:37:20Z" }, { "body": "thanks for alerting me to this, i didn't realize a validator could be set on group settings until you mentioned it. I fixed this and all other occurrences below", "created_at": "2017-01-12T16:38:00Z" } ], "title": "Allow comma delimited array settings to have a space after each entry" }
{ "commits": [ { "message": "Allow comma delimited array settings to have a space after each entry\n\nPreviously, certain settings that could take multiple comma delimited\nvalues would pick up incorrect values for all entries but the first if\neach comma separated value was followed by a whitespace character. For\nexample, the multi-value \"A,B,C\" would be correctly parsed as\n[\"A\", \"B\", \"C\"] but the multi-value \"A, B, C\" would be incorrectly parsed\nas [\"A\", \" B\", \" C\"].\n\nThis commit allows a comma separated list to have whitespace characters\nafter each entry. The specific settings that were affected by this are:\n\n cluster.routing.allocation.awareness.attributes\n index.routing.allocation.require.*\n index.routing.allocation.include.*\n index.routing.allocation.exclude.*\n cluster.routing.allocation.require.*\n cluster.routing.allocation.include.*\n cluster.routing.allocation.exclude.*\n http.cors.allow-methods\n http.cors.allow-headers\n\nFor the allocation filtering related settings, this commit also provides\nvalidation of each specified entry if the filtering is done by _ip,\n_host_ip, or _publish_ip, to ensure that each entry is a valid IP\naddress.\n\nCloses #22297" }, { "message": "address code review" }, { "message": "disallow node attributes with leading or trailing whitespace" } ], "files": [ { "diff": "@@ -70,6 +70,7 @@\n import java.util.Set;\n import java.util.function.Function;\n \n+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR;\n import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;\n import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;\n import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;\n@@ -242,11 +243,11 @@ static Setting<Integer> buildNumberOfShardsSetting() {\n public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = \"index.routing.allocation.include\";\n public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = \"index.routing.allocation.exclude\";\n public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =\n- Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + \".\", Property.Dynamic, Property.IndexScope);\n+ Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + \".\", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);\n public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =\n- Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + \".\", Property.Dynamic, Property.IndexScope);\n+ Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + \".\", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);\n public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =\n- Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + \".\", Property.Dynamic, Property.IndexScope);\n+ Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + \".\", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);\n public static final Setting<Settings> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =\n Setting.groupSetting(\"index.routing.allocation.initial_recovery.\"); // this is only setable internally not a registered setting!!\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java", "status": "modified" }, { "diff": "@@ -21,13 +21,15 @@\n \n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.network.InetAddresses;\n import org.elasticsearch.common.network.NetworkAddress;\n import org.elasticsearch.common.regex.Regex;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.transport.TransportAddress;\n \n import java.util.HashMap;\n import java.util.Map;\n+import java.util.function.Consumer;\n \n public class DiscoveryNodeFilters {\n \n@@ -36,14 +38,33 @@ public enum OpType {\n OR\n }\n \n+ /**\n+ * Validates the IP addresses in a group of {@link Settings} by looking for the keys\n+ * \"_ip\", \"_host_ip\", and \"_publish_ip\" and ensuring each of their comma separated values\n+ * is a valid IP address.\n+ */\n+ public static final Consumer<Settings> IP_VALIDATOR = (settings) -> {\n+ Map<String, String> settingsMap = settings.getAsMap();\n+ for (Map.Entry<String, String> entry : settingsMap.entrySet()) {\n+ String propertyKey = entry.getKey();\n+ if (\"_ip\".equals(propertyKey) || \"_host_ip\".equals(propertyKey) || \"_publish_ip\".equals(propertyKey)) {\n+ for (String value : Strings.tokenizeToStringArray(entry.getValue(), \",\")) {\n+ if (InetAddresses.isInetAddress(value) == false) {\n+ throw new IllegalArgumentException(\"invalid IP address [\" + value + \"] for [\" + propertyKey + \"]\");\n+ }\n+ }\n+ }\n+ }\n+ };\n+\n public static DiscoveryNodeFilters buildFromSettings(OpType opType, String prefix, Settings settings) {\n return buildFromKeyValue(opType, settings.getByPrefix(prefix).getAsMap());\n }\n \n public static DiscoveryNodeFilters buildFromKeyValue(OpType opType, Map<String, String> filters) {\n Map<String, String[]> bFilters = new HashMap<>();\n for (Map.Entry<String, String> entry : filters.entrySet()) {\n- String[] values = Strings.splitStringByCommaToArray(entry.getValue());\n+ String[] values = Strings.tokenizeToStringArray(entry.getValue(), \",\");\n if (values.length > 0) {\n bFilters.put(entry.getKey(), values);\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java", "status": "modified" }, { "diff": "@@ -78,12 +78,12 @@ public class AwarenessAllocationDecider extends AllocationDecider {\n public static final String NAME = \"awareness\";\n \n public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING =\n- new Setting<>(\"cluster.routing.allocation.awareness.attributes\", \"\", Strings::splitStringByCommaToArray , Property.Dynamic,\n+ new Setting<>(\"cluster.routing.allocation.awareness.attributes\", \"\", s -> Strings.tokenizeToStringArray(s, \",\"), Property.Dynamic,\n Property.NodeScope);\n public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING =\n Setting.groupSetting(\"cluster.routing.allocation.awareness.force.\", Property.Dynamic, Property.NodeScope);\n \n- private String[] awarenessAttributes;\n+ private volatile String[] awarenessAttributes;\n \n private volatile Map<String, String[]> forcedAwarenessAttributes;\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java", "status": "modified" }, { "diff": "@@ -30,6 +30,7 @@\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n \n+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR;\n import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;\n import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;\n \n@@ -68,11 +69,11 @@ public class FilterAllocationDecider extends AllocationDecider {\n private static final String CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX = \"cluster.routing.allocation.include\";\n private static final String CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX = \"cluster.routing.allocation.exclude\";\n public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =\n- Setting.groupSetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + \".\", Property.Dynamic, Property.NodeScope);\n+ Setting.groupSetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + \".\", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);\n public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =\n- Setting.groupSetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + \".\", Property.Dynamic, Property.NodeScope);\n+ Setting.groupSetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + \".\", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);\n public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =\n- Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + \".\", Property.Dynamic, Property.NodeScope);\n+ Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + \".\", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);\n \n private volatile DiscoveryNodeFilters clusterRequireFilters;\n private volatile DiscoveryNodeFilters clusterIncludeFilters;", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java", "status": "modified" }, { "diff": "@@ -185,7 +185,16 @@ public class Node implements Closeable {\n */\n public static final Setting<Boolean> NODE_LOCAL_STORAGE_SETTING = Setting.boolSetting(\"node.local_storage\", true, Property.NodeScope);\n public static final Setting<String> NODE_NAME_SETTING = Setting.simpleString(\"node.name\", Property.NodeScope);\n- public static final Setting<Settings> NODE_ATTRIBUTES = Setting.groupSetting(\"node.attr.\", Property.NodeScope);\n+ public static final Setting<Settings> NODE_ATTRIBUTES = Setting.groupSetting(\"node.attr.\", (settings) -> {\n+ Map<String, String> settingsMap = settings.getAsMap();\n+ for (Map.Entry<String, String> entry : settingsMap.entrySet()) {\n+ String value = entry.getValue();\n+ if (Character.isWhitespace(value.charAt(0)) || Character.isWhitespace(value.charAt(value.length() - 1))) {\n+ throw new IllegalArgumentException(\"node.attr.\" + entry.getKey() + \" cannot have leading or trailing whitespace \" +\n+ \"[\" + value + \"]\");\n+ }\n+ }\n+ }, Property.NodeScope);\n public static final Setting<String> BREAKER_TYPE_KEY = new Setting<>(\"indices.breaker.type\", \"hierarchy\", (s) -> {\n switch (s) {\n case \"hierarchy\":", "filename": "core/src/main/java/org/elasticsearch/node/Node.java", "status": "modified" }, { "diff": "@@ -24,8 +24,10 @@\n import org.elasticsearch.cluster.routing.IndexRoutingTable;\n import org.elasticsearch.cluster.routing.IndexShardRoutingTable;\n import org.elasticsearch.cluster.routing.ShardRouting;\n+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;\n import org.elasticsearch.common.logging.Loggers;\n+import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.test.ESIntegTestCase;\n@@ -144,5 +146,15 @@ public void testDisablingAllocationFiltering() throws Exception {\n clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();\n assertThat(clusterState.routingTable().index(\"test\").numberOfNodesShardsAreAllocatedOn(), equalTo(2));\n }\n+\n+ public void testInvalidIPFilterClusterSettings() {\n+ String ipKey = randomFrom(\"_ip\", \"_host_ip\", \"_publish_ip\");\n+ Setting<Settings> filterSetting = randomFrom(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,\n+ FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING);\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings()\n+ .setTransientSettings(Settings.builder().put(filterSetting.getKey() + ipKey, \"192.168.1.1.\"))\n+ .execute().actionGet());\n+ assertEquals(\"invalid IP address [192.168.1.1.] for [\" + ipKey + \"]\", e.getMessage());\n+ }\n }\n ", "filename": "core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java", "status": "modified" }, { "diff": "@@ -245,6 +245,17 @@ public void testIpPublishFilteringNotMatchingOr() {\n assertThat(filters.match(node), equalTo(true));\n }\n \n+ public void testCommaSeparatedValuesTrimmed() {\n+ DiscoveryNode node = new DiscoveryNode(\"\", \"\", \"\", \"\", \"192.1.1.54\", localAddress, singletonMap(\"tag\", \"B\"), emptySet(), null);\n+\n+ Settings settings = shuffleSettings(Settings.builder()\n+ .put(\"xxx.\" + randomFrom(\"_ip\", \"_host_ip\", \"_publish_ip\"), \"192.1.1.1, 192.1.1.54\")\n+ .put(\"xxx.tag\", \"A, B\")\n+ .build());\n+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, \"xxx.\", settings);\n+ assertTrue(filters.match(node));\n+ }\n+\n private Settings shuffleSettings(Settings source) {\n Settings.Builder settings = Settings.builder();\n List<String> keys = new ArrayList<>(source.getAsMap().keySet());", "filename": "core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java", "status": "modified" }, { "diff": "@@ -32,10 +32,14 @@\n import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;\n import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;\n import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;\n+import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.common.settings.Settings;\n \n+import java.util.HashMap;\n+import java.util.Map;\n+\n import static java.util.Collections.singletonMap;\n import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;\n import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;\n@@ -803,4 +807,50 @@ public void testUnassignedShardsWithUnbalancedZones() {\n assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4)); // +1 for relocating shard.\n assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); // Still 1 unassigned.\n }\n+\n+ public void testMultipleAwarenessAttributes() {\n+ AllocationService strategy = createAllocationService(Settings.builder()\n+ .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), \"zone, rack\")\n+ .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + \"zone.values\", \"a, b\")\n+ .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + \"rack.values\", \"c, d\")\n+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), \"always\")\n+ .build());\n+\n+ logger.info(\"Building initial routing table for 'testUnbalancedZones'\");\n+\n+ MetaData metaData = MetaData.builder()\n+ .put(IndexMetaData.builder(\"test\").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))\n+ .build();\n+\n+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index(\"test\")).build();\n+\n+ ClusterState clusterState = ClusterState.builder(\n+ org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)\n+ ).metaData(metaData).routingTable(initialRoutingTable).build();\n+\n+ logger.info(\"--> adding two nodes in different zones and do rerouting\");\n+ Map<String, String> nodeAAttributes = new HashMap<>();\n+ nodeAAttributes.put(\"zone\", \"a\");\n+ nodeAAttributes.put(\"rack\", \"c\");\n+ Map<String, String> nodeBAttributes = new HashMap<>();\n+ nodeBAttributes.put(\"zone\", \"b\");\n+ nodeBAttributes.put(\"rack\", \"d\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()\n+ .add(newNode(\"A-0\", nodeAAttributes))\n+ .add(newNode(\"B-0\", nodeBAttributes))\n+ ).build();\n+ clusterState = strategy.reroute(clusterState, \"reroute\");\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));\n+\n+ logger.info(\"--> start the shards (primaries)\");\n+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));\n+\n+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));\n+ logger.info(\"--> all replicas are allocated and started since we have one node in each zone and rack\");\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java", "status": "modified" }, { "diff": "@@ -37,6 +37,8 @@\n import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;\n import org.elasticsearch.common.settings.ClusterSettings;\n+import org.elasticsearch.common.settings.IndexScopedSettings;\n+import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.snapshots.Snapshot;\n import org.elasticsearch.snapshots.SnapshotId;\n@@ -191,4 +193,16 @@ private ClusterState createInitialClusterState(AllocationService service, Settin\n .build();\n return service.reroute(clusterState, \"reroute\", false);\n }\n+\n+ public void testInvalidIPFilter() {\n+ String ipKey = randomFrom(\"_ip\", \"_host_ip\", \"_publish_ip\");\n+ Setting<Settings> filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,\n+ IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING);\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {\n+ IndexScopedSettings indexScopedSettings = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);\n+ indexScopedSettings.updateDynamicSettings(Settings.builder().put(filterSetting.getKey() + ipKey, \"192..168.1.1\").build(),\n+ Settings.builder().put(Settings.EMPTY), Settings.builder(), \"test ip validation\");\n+ });\n+ assertEquals(\"invalid IP address [192..168.1.1] for [\" + ipKey + \"]\", e.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java", "status": "modified" }, { "diff": "@@ -420,14 +420,14 @@ static Netty4CorsConfig buildCorsConfig(Settings settings) {\n if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {\n builder.allowCredentials();\n }\n- String[] strMethods = Strings.splitStringByCommaToArray(SETTING_CORS_ALLOW_METHODS.get(settings));\n+ String[] strMethods = Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_METHODS.get(settings), \",\");\n HttpMethod[] methods = Arrays.asList(strMethods)\n .stream()\n .map(HttpMethod::valueOf)\n .toArray(size -> new HttpMethod[size]);\n return builder.allowedRequestMethods(methods)\n .maxAge(SETTING_CORS_MAX_AGE.get(settings))\n- .allowedRequestHeaders(Strings.splitStringByCommaToArray(SETTING_CORS_ALLOW_HEADERS.get(settings)))\n+ .allowedRequestHeaders(Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_HEADERS.get(settings), \",\"))\n .shortCircuit()\n .build();\n }", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java", "status": "modified" }, { "diff": "@@ -51,6 +51,7 @@\n import java.util.Set;\n import java.util.stream.Collectors;\n \n+import static org.elasticsearch.common.Strings.collectionToDelimitedString;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n@@ -89,11 +90,12 @@ public void shutdown() throws Exception {\n public void testCorsConfig() {\n final Set<String> methods = new HashSet<>(Arrays.asList(\"get\", \"options\", \"post\"));\n final Set<String> headers = new HashSet<>(Arrays.asList(\"Content-Type\", \"Content-Length\"));\n+ final String suffix = randomBoolean() ? \" \" : \"\"; // sometimes have a leading whitespace between comma delimited elements\n final Settings settings = Settings.builder()\n .put(SETTING_CORS_ENABLED.getKey(), true)\n .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), \"*\")\n- .put(SETTING_CORS_ALLOW_METHODS.getKey(), Strings.collectionToCommaDelimitedString(methods))\n- .put(SETTING_CORS_ALLOW_HEADERS.getKey(), Strings.collectionToCommaDelimitedString(headers))\n+ .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, \",\", suffix, \"\"))\n+ .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, \",\", suffix, \"\"))\n .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true)\n .build();\n final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings);", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java", "status": "modified" }, { "diff": "@@ -138,4 +138,41 @@ public void testWarnIfPreRelease() {\n \n }\n \n+ public void testNodeAttributes() throws IOException {\n+ String attr = randomAsciiOfLength(5);\n+ Settings.Builder settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + \"test_attr\", attr);\n+ try (Node node = new MockNode(settings.build(), Collections.singleton(MockTcpTransportPlugin.class))) {\n+ final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings();\n+ assertEquals(attr, Node.NODE_ATTRIBUTES.get(nodeSettings).getAsMap().get(\"test_attr\"));\n+ }\n+\n+ // leading whitespace not allowed\n+ attr = \" leading\";\n+ settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + \"test_attr\", attr);\n+ try (Node node = new MockNode(settings.build(), Collections.singleton(MockTcpTransportPlugin.class))) {\n+ fail(\"should not allow a node attribute with leading whitespace\");\n+ } catch (IllegalArgumentException e) {\n+ assertEquals(\"node.attr.test_attr cannot have leading or trailing whitespace [ leading]\", e.getMessage());\n+ }\n+\n+ // trailing whitespace not allowed\n+ attr = \"trailing \";\n+ settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + \"test_attr\", attr);\n+ try (Node node = new MockNode(settings.build(), Collections.singleton(MockTcpTransportPlugin.class))) {\n+ fail(\"should not allow a node attribute with trailing whitespace\");\n+ } catch (IllegalArgumentException e) {\n+ assertEquals(\"node.attr.test_attr cannot have leading or trailing whitespace [trailing ]\", e.getMessage());\n+ }\n+ }\n+\n+ private static Settings.Builder baseSettings() {\n+ final Path tempDir = createTempDir();\n+ return Settings.builder()\n+ .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName(\"single-node-cluster\", randomLong()))\n+ .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)\n+ .put(NetworkModule.HTTP_ENABLED.getKey(), false)\n+ .put(\"transport.type\", \"mock-socket-network\")\n+ .put(Node.NODE_DATA_SETTING.getKey(), true);\n+ }\n+\n }", "filename": "test/framework/src/main/java/org/elasticsearch/node/NodeTests.java", "status": "modified" } ] }
{ "body": "The low level TCP handshake can cause channel / connection leaks if it's interrupted\r\nsince the caller doesn't close the channel / connection if the handshake was not successful.\r\nThis commit fixes the channel leak and adds general test infrastructure to detect channel leaks\r\nin the future.", "comments": [ { "body": "@elasticmachine test this please", "created_at": "2017-01-11T14:59:57Z" } ], "number": 22554, "title": "Prevent open channel leaks if handshake times out or is interrupted" }
{ "body": "Today there are several races / holes in TcpTransport and MockTcpTransport\r\nthat can allow connections to be opened and remain unclosed while the actual\r\ntransport implementation is closed. A recently added assertions in #22554 exposes\r\nthese problems. This commit fixes several issues related to missed locks or channel\r\ncreations outside of a lock not checking if the resource is still open.", "number": 22589, "review_comments": [ { "body": "Assert that the lock is held?", "created_at": "2017-01-12T15:52:20Z" }, { "body": "I think that `removedChannel != null && removalChannel` is more defensive (here we are relying on either always putting `Boolean.TRUE` in the map (which we do), or the fact that a boxed boolean always boxes to `Boolean.TRUE` (the JVM guarantees this) but I prefer to not rely on these fragile/subtle things).", "created_at": "2017-01-12T15:56:15Z" }, { "body": "I don't think it needs to hold a lock here. it's really best effort and can be used anywhere. Under lock it has different semantics but they are related to the context of the lock so I think we are good", "created_at": "2017-01-12T18:29:10Z" }, { "body": "I just moved to use a set instead.", "created_at": "2017-01-12T18:29:25Z" }, { "body": "That's even better.", "created_at": "2017-01-12T18:29:33Z" }, { "body": "I guess I'm missing something, but you're checking that we are indeed open; if we don't hold the lock here aren't we subject to time-of-check-time-of-use problems?", "created_at": "2017-01-12T18:30:59Z" }, { "body": "again, the ensureOpen is only a check to fail if we are closed. Yet, if you run it under the lock you make sure that nobody else is modifying a structure protected by this lock. The state of being open / closed is not protected by this lock. It's a volatile read that's it. I don't understand what your issue is here to be honest. The lock has nothing todo with the ensureOpen call. We do this in many other places and almost never protect that once the check has passed a close can't be concurrently happen. For the check itself it doesn't matter, it might matter for the caller but the callers context can have this extra protection. In this case here all places where it's used are accidentally protected by a lock but that is not required for this check.", "created_at": "2017-01-12T19:26:40Z" }, { "body": "I'm sorry that I misread it.", "created_at": "2017-01-12T19:47:42Z" }, { "body": "I guess this can happen to others... I am going to add a comment", "created_at": "2017-01-12T20:13:09Z" } ], "title": "Ensure new connections won't be opened if transport is closed or closing" }
{ "commits": [ { "message": "Ensure new connections won't be opened if transport is closed or closing\n\nToday there are several races / holes in TcpTransport and MockTcpTransport\nthat can allow connections to be opened and remain unclosed while the actual\ntransport implementation is closed. A recently added assertions in #22554 exposes\nthese problems. This commit fixes several issues related to missed locks or channel\ncreations outside of a lock not checking if the resource is still open." }, { "message": "use a set instead of a map for open reference tracking" }, { "message": "remove unused imports" } ], "files": [ { "diff": "@@ -432,15 +432,12 @@ public boolean nodeConnected(DiscoveryNode node) {\n @Override\n public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile) {\n connectionProfile = connectionProfile == null ? defaultConnectionProfile : connectionProfile;\n- if (!lifecycle.started()) {\n- throw new IllegalStateException(\"can't add nodes to a stopped transport\");\n- }\n if (node == null) {\n throw new ConnectTransportException(null, \"can't connect to a null node\");\n }\n- globalLock.readLock().lock();\n+ globalLock.readLock().lock(); // ensure we don't open connections while we are closing\n try {\n-\n+ ensureOpen();\n try (Releasable ignored = connectionLock.acquire(node.getId())) {\n if (!lifecycle.started()) {\n throw new IllegalStateException(\"can't add nodes to a stopped transport\");\n@@ -477,31 +474,40 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil\n \n @Override\n public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException {\n+ if (node == null) {\n+ throw new ConnectTransportException(null, \"can't open connection to a null node\");\n+ }\n boolean success = false;\n NodeChannels nodeChannels = null;\n+ globalLock.readLock().lock(); // ensure we don't open connections while we are closing\n try {\n- nodeChannels = connectToChannels(node, connectionProfile);\n- final Channel channel = nodeChannels.getChannels().get(0); // one channel is guaranteed by the connection profile\n- final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ?\n- defaultConnectionProfile.getConnectTimeout() :\n- connectionProfile.getConnectTimeout();\n- final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ?\n- connectTimeout : connectionProfile.getHandshakeTimeout();\n- final Version version = executeHandshake(node, channel, handshakeTimeout);\n- transportServiceAdapter.onConnectionOpened(node);\n- nodeChannels = new NodeChannels(nodeChannels, version);// clone the channels - we now have the correct version\n- success = true;\n- return nodeChannels;\n- } catch (ConnectTransportException e) {\n- throw e;\n- } catch (Exception e) {\n- // ConnectTransportExceptions are handled specifically on the caller end - we wrap the actual exception to ensure\n- // only relevant exceptions are logged on the caller end.. this is the same as in connectToNode\n- throw new ConnectTransportException(node, \"general node connection failure\", e);\n- } finally {\n- if (success == false) {\n- IOUtils.closeWhileHandlingException(nodeChannels);\n+ ensureOpen();\n+ try {\n+ nodeChannels = connectToChannels(node, connectionProfile);\n+ final Channel channel = nodeChannels.getChannels().get(0); // one channel is guaranteed by the connection profile\n+ final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ?\n+ defaultConnectionProfile.getConnectTimeout() :\n+ connectionProfile.getConnectTimeout();\n+ final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ?\n+ connectTimeout : connectionProfile.getHandshakeTimeout();\n+ final Version version = executeHandshake(node, channel, handshakeTimeout);\n+ transportServiceAdapter.onConnectionOpened(node);\n+ nodeChannels = new NodeChannels(nodeChannels, version);// clone the channels - we now have the correct version\n+ success = true;\n+ return nodeChannels;\n+ } catch (ConnectTransportException e) {\n+ throw e;\n+ } catch (Exception e) {\n+ // ConnectTransportExceptions are handled specifically on the caller end - we wrap the actual exception to ensure\n+ // only relevant exceptions are logged on the caller end.. this is the same as in connectToNode\n+ throw new ConnectTransportException(node, \"general node connection failure\", e);\n+ } finally {\n+ if (success == false) {\n+ IOUtils.closeWhileHandlingException(nodeChannels);\n+ }\n }\n+ } finally {\n+ globalLock.readLock().unlock();\n }\n }\n \n@@ -1577,4 +1583,14 @@ protected final void onChannelClosed(Channel channel) {\n }\n }\n }\n+\n+ /**\n+ * Ensures this transport is still started / open\n+ * @throws IllegalStateException if the transport is not started / open\n+ */\n+ protected final void ensureOpen() {\n+ if (lifecycle.started() == false) {\n+ throw new IllegalStateException(\"transport has been stopped\");\n+ }\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/transport/TcpTransport.java", "status": "modified" }, { "diff": "@@ -49,10 +49,10 @@\n import java.net.Socket;\n import java.net.SocketException;\n import java.net.SocketTimeoutException;\n-import java.util.HashMap;\n-import java.util.IdentityHashMap;\n+import java.util.HashSet;\n import java.util.List;\n import java.util.Map;\n+import java.util.Set;\n import java.util.concurrent.ConcurrentHashMap;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.Executor;\n@@ -76,7 +76,7 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>\n */\n public static final ConnectionProfile LIGHT_PROFILE;\n \n- private final Map<MockChannel, Boolean> openChannels = new IdentityHashMap<>();\n+ private final Set<MockChannel> openChannels = new HashSet<>();\n \n static {\n ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n@@ -289,7 +289,7 @@ public MockChannel(Socket socket, InetSocketAddress localAddress, String profile\n this.profile = profile;\n this.onClose = () -> onClose.accept(this);\n synchronized (openChannels) {\n- openChannels.put(this, Boolean.TRUE);\n+ openChannels.add(this);\n }\n }\n \n@@ -305,6 +305,9 @@ public MockChannel(ServerSocket serverSocket, String profile) {\n this.profile = profile;\n this.activeChannel = null;\n this.onClose = null;\n+ synchronized (openChannels) {\n+ openChannels.add(this);\n+ }\n }\n \n public void accept(Executor executor) throws IOException {\n@@ -313,10 +316,10 @@ public void accept(Executor executor) throws IOException {\n MockChannel incomingChannel = null;\n try {\n configureSocket(incomingSocket);\n- incomingChannel = new MockChannel(incomingSocket, localAddress, profile, workerChannels::remove);\n- //establish a happens-before edge between closing and accepting a new connection\n synchronized (this) {\n if (isOpen.get()) {\n+ incomingChannel = new MockChannel(incomingSocket, localAddress, profile, workerChannels::remove);\n+ //establish a happens-before edge between closing and accepting a new connection\n workerChannels.put(incomingChannel, Boolean.TRUE);\n // this spawns a new thread immediately, so OK under lock\n incomingChannel.loopRead(executor);\n@@ -360,7 +363,7 @@ protected void doRun() throws Exception {\n @Override\n public void close() throws IOException {\n if (isOpen.compareAndSet(true, false)) {\n- final Boolean removedChannel;\n+ final boolean removedChannel;\n synchronized (openChannels) {\n removedChannel = openChannels.remove(this);\n }\n@@ -370,9 +373,19 @@ public void close() throws IOException {\n IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels.keySet()),\n () -> cancellableThreads.cancel(\"channel closed\"), onClose);\n }\n- assert removedChannel : \"Channel was not removed or removed twice?\";\n+ assert removedChannel: \"Channel was not removed or removed twice?\";\n }\n }\n+\n+ @Override\n+ public String toString() {\n+ return \"MockChannel{\" +\n+ \"profile='\" + profile + '\\'' +\n+ \", isOpen=\" + isOpen +\n+ \", localAddress=\" + localAddress +\n+ \", isServerSocket=\" + (serverSocket != null) +\n+ '}';\n+ }\n }\n \n ", "filename": "test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java", "status": "modified" } ] }
{ "body": "**Repo Step**\r\n1. Download Elasticsearch 5.1.1\r\n2. Install the latest snapshot plugin with command .\\elasticsearch-plugin.bat install repository-azure\r\n3. Add the following Azure Storage configurations in elasticserch.yml\r\n cloud.azure.storage.my_account1.account: xxx\r\n cloud.azure.storage.my_account1.key: xxx\r\n cloud.azure.storage.my_account1.default: true\r\n4. Start Elasticsearch with elasticsearch.bat\r\n5. Index a single document like the following \r\n PUT twitter/tweet/1\r\n {\r\n \"user\" : \"kimchy\",\r\n \"post_date\" : \"2009-11-15T14:12:12\",\r\n \"message\" : \"trying out Elasticsearch\"\r\n }\r\n6. Create a repository \r\nPUT /_snapshot/test-20170111\r\n{\r\n \"type\": \"azure\",\r\n \"settings\": {\r\n \"account\": \"my_account1\",\r\n \"container\": \"test-20170111\"\r\n }\r\n}\r\n7. Take a snapshot \r\nPUT /_snapshot/test-20170111/backup01?wait_for_completion=true \r\n8. After snapshot is done, then delete all the indices on the cluster\r\n9. Restore the snapshot \r\nPOST /_snapshot/test-20170111/backup01/_restore?wait_for_completion=true\r\n\r\n**Expected** \r\nRestore succeeds and the cluster has one index named twitter, which contains one document.\r\n\r\n**Actual**\r\nRestored failed with the following error messages in log. \r\n[2017-01-09T15:56:51,920][WARN ][o.e.i.c.IndicesClusterStateService] [MRed28V] [[twitter][2]] marking and sending shard failed due to [failed recovery]\r\norg.elasticsearch.indices.recovery.RecoveryFailedException: [twitter][2]: Recovery failed on {MRed28V}{MRed28V9SVCOhAKpfxh01Q}{qty_A70hSGisRYnY2jT4VA}{127.0.0.1}{127.0.0.1:9300}\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1512) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) [elasticsearch-5.1.1.jar:5.1.1]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_111]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_111]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_111]\r\nCaused by: org.elasticsearch.index.shard.IndexShardRecoveryException: failed recovery\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:300) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: restore failed\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:406) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: failed to restore snapshot [backup05/Zl8a5zPGSR6NsKbHIegyDw]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:914) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: Failed to recover index\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restore(BlobStoreRepository.java:1600) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:912) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.apache.lucene.index.CorruptIndexException: verification failed (hardware problem?) : expected=1ck4qa4 actual=null footer=null writtenLength=0 expectedLength=130 (resource=name [segments_1], length [130], checksum [1ck4qa4], writtenBy [5.0.0]) (resource=VerifyingIndexOutput(segments_1))\r\n\tat org.elasticsearch.index.store.Store$LuceneVerifyingIndexOutput.verify(Store.java:1120) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.store.Store.verify(Store.java:450) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restoreFile(BlobStoreRepository.java:1662) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restore(BlobStoreRepository.java:1597) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:912) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\n[2017-01-09T15:56:51,926][WARN ][o.e.c.a.s.ShardStateAction] [MRed28V] [twitter][2] received shard failed for shard id [[twitter][2]], allocation id [Ac1iHjlcSjuT61MdlaIz5w], primary term [0], message [failed recovery], failure [RecoveryFailedException[[twitter][2]: Recovery failed on {MRed28V}{MRed28V9SVCOhAKpfxh01Q}{qty_A70hSGisRYnY2jT4VA}{127.0.0.1}{127.0.0.1:9300}]; nested: IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [backup05/Zl8a5zPGSR6NsKbHIegyDw]]; nested: IndexShardRestoreFailedException[Failed to recover index]; nested: CorruptIndexException[verification failed (hardware problem?) : expected=1ck4qa4 actual=null footer=null writtenLength=0 expectedLength=130 (resource=name [segments_1], length [130], checksum [1ck4qa4], writtenBy [5.0.0]) (resource=VerifyingIndexOutput(segments_1))]; ]\r\norg.elasticsearch.indices.recovery.RecoveryFailedException: [twitter][2]: Recovery failed on {MRed28V}{MRed28V9SVCOhAKpfxh01Q}{qty_A70hSGisRYnY2jT4VA}{127.0.0.1}{127.0.0.1:9300}\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1512) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) [elasticsearch-5.1.1.jar:5.1.1]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_111]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_111]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_111]\r\nCaused by: org.elasticsearch.index.shard.IndexShardRecoveryException: failed recovery\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:300) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: restore failed\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:406) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: failed to restore snapshot [backup05/Zl8a5zPGSR6NsKbHIegyDw]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:914) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: Failed to recover index\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restore(BlobStoreRepository.java:1600) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:912) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.apache.lucene.index.CorruptIndexException: verification failed (hardware problem?) : expected=1ck4qa4 actual=null footer=null writtenLength=0 expectedLength=130 (resource=name [segments_1], length [130], checksum [1ck4qa4], writtenBy [5.0.0]) (resource=VerifyingIndexOutput(segments_1))\r\n\tat org.elasticsearch.index.store.Store$LuceneVerifyingIndexOutput.verify(Store.java:1120) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.store.Store.verify(Store.java:450) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restoreFile(BlobStoreRepository.java:1662) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restore(BlobStoreRepository.java:1597) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:912) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\n[2017-01-09T15:56:51,943][INFO ][o.e.c.r.a.AllocationService] [MRed28V] Cluster health status changed from [YELLOW] to [RED] (reason: [shards failed [[twitter][2]] ...]).\r\n[2017-01-09T15:56:52,031][WARN ][o.e.i.c.IndicesClusterStateService] [MRed28V] [[twitter][3]] marking and sending shard failed due to [failed recovery]\r\norg.elasticsearch.indices.recovery.RecoveryFailedException: [twitter][3]: Recovery failed on {MRed28V}{MRed28V9SVCOhAKpfxh01Q}{qty_A70hSGisRYnY2jT4VA}{127.0.0.1}{127.0.0.1:9300}\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1512) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) [elasticsearch-5.1.1.jar:5.1.1]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_111]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_111]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_111]\r\nCaused by: org.elasticsearch.index.shard.IndexShardRecoveryException: failed recovery\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:300) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: restore failed\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:406) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: failed to restore snapshot [backup05/Zl8a5zPGSR6NsKbHIegyDw]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:914) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.elasticsearch.index.snapshots.IndexShardRestoreFailedException: Failed to recover index\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restore(BlobStoreRepository.java:1600) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:912) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\nCaused by: org.apache.lucene.index.CorruptIndexException: verification failed (hardware problem?) : expected=jll51r actual=null footer=null writtenLength=0 expectedLength=405 (resource=name [_0.cfe], length [405], checksum [jll51r], writtenBy [6.3.0]) (resource=VerifyingIndexOutput(_0.cfe))\r\n\tat org.elasticsearch.index.store.Store$LuceneVerifyingIndexOutput.verify(Store.java:1120) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.store.Store.verify(Store.java:450) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restoreFile(BlobStoreRepository.java:1662) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository$RestoreContext.restore(BlobStoreRepository.java:1597) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.restoreShard(BlobStoreRepository.java:912) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:401) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$4(StoreRecovery.java:235) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:258) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:233) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1244) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$2(IndexShard.java:1508) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n\t... 4 more\r\n", "comments": [ { "body": "Linking PR: https://github.com/elastic/elasticsearch/pull/22577\r\n\r\nWorkaround is to explicitly set the chunk_size setting to 64m to match the default chunck_size value, eg.\r\n\r\n```\r\nPUT _snapshot/my_azure_repo\r\n{\r\n \"type\": \"azure\",\r\n \"settings\": {\r\n \"container\": \"container_name\",\r\n \"base_path\": \"some_path\",\r\n \"chunk_size\": \"64m\"\r\n }\r\n}\r\n```", "created_at": "2017-01-12T06:15:33Z" } ], "number": 22513, "title": "Snapshot restore failed after a successful snapshot with ES 5.1.1" }
{ "body": "Before, the default chunk size for Azure repositories was\r\n-1 bytes, which meant that if the chunk_size was not set on\r\nthe Azure repository, nor as a node setting, then no data\r\nfiles would get written as part of the snapshot (because\r\nthe BlobStoreRepository's PartSliceStream does not know\r\nhow to process negative chunk sizes).\r\n\r\nThis commit fixes the default chunk size for Azure repositories\r\nto be the same as the maximum chunk size. This commit also\r\nadds tests for both the Azure and Google Cloud repositories to\r\nensure only valid chunk sizes can be set.\r\n\r\nCloses #22513", "number": 22577, "review_comments": [ { "body": "I don't understand the value of this assertion given the assertion on the previous line?", "created_at": "2017-01-12T12:38:34Z" }, { "body": "Same question here?", "created_at": "2017-01-12T12:39:28Z" }, { "body": "Good catch, this is sort of left over as the assertion is wrong (the chunk size should always be > 0, not >= 0). I'll remove this and the one below", "created_at": "2017-01-12T13:55:36Z" } ], "title": "Fixes default chunk size for Azure repositories" }
{ "commits": [ { "message": "Fixes default chunk size for Azure repositories\n\nBefore, the default chunk size for Azure repositories was\n-1 bytes, which meant that if the chunk_size was not set on\nthe Azure repository, nor as a node setting, then no data\nfiles would get written as part of the snapshot (because\nthe BlobStoreRepository's PartSliceStream does not know\nhow to process negative chunk sizes).\n\nThis commit fixes the default chunk size for Azure repositories\nto be the same as the maximum chunk size. This commit also\nadds tests for both the Azure and Google Cloud repositories to\nensure only valid chunk sizes can be set." }, { "message": "removes unneeded assertion" } ], "files": [ { "diff": "@@ -26,6 +26,7 @@\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.ByteSizeUnit;\n import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.common.unit.TimeValue;\n \n@@ -42,6 +43,9 @@\n */\n public interface AzureStorageService {\n \n+ ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);\n+ ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);\n+\n final class Storage {\n public static final String PREFIX = \"cloud.azure.storage.\";\n \n@@ -58,7 +62,7 @@ final class Storage {\n public static final Setting<String> LOCATION_MODE_SETTING =\n Setting.simpleString(\"repositories.azure.location_mode\", Property.NodeScope);\n public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING =\n- Setting.byteSizeSetting(\"repositories.azure.chunk_size\", new ByteSizeValue(-1), Property.NodeScope);\n+ Setting.byteSizeSetting(\"repositories.azure.chunk_size\", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope);\n public static final Setting<Boolean> COMPRESS_SETTING =\n Setting.boolSetting(\"repositories.azure.compress\", false, Property.NodeScope);\n }", "filename": "plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java", "status": "modified" }, { "diff": "@@ -40,15 +40,14 @@\n import org.elasticsearch.common.blobstore.BlobStore;\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n-import org.elasticsearch.common.settings.SettingsException;\n-import org.elasticsearch.common.unit.ByteSizeUnit;\n import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.env.Environment;\n import org.elasticsearch.repositories.RepositoryVerificationException;\n import org.elasticsearch.repositories.blobstore.BlobStoreRepository;\n import org.elasticsearch.snapshots.SnapshotCreationException;\n \n-import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getEffectiveSetting;\n+import static org.elasticsearch.cloud.azure.storage.AzureStorageService.MAX_CHUNK_SIZE;\n+import static org.elasticsearch.cloud.azure.storage.AzureStorageService.MIN_CHUNK_SIZE;\n import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getValue;\n \n /**\n@@ -64,8 +63,6 @@\n */\n public class AzureRepository extends BlobStoreRepository {\n \n- private static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);\n-\n public static final String TYPE = \"azure\";\n \n public static final class Repository {\n@@ -75,7 +72,7 @@ public static final class Repository {\n public static final Setting<String> BASE_PATH_SETTING = Setting.simpleString(\"base_path\", Property.NodeScope);\n public static final Setting<String> LOCATION_MODE_SETTING = Setting.simpleString(\"location_mode\", Property.NodeScope);\n public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING =\n- Setting.byteSizeSetting(\"chunk_size\", MAX_CHUNK_SIZE, Property.NodeScope);\n+ Setting.byteSizeSetting(\"chunk_size\", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope);\n public static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting(\"compress\", false, Property.NodeScope);\n }\n \n@@ -92,14 +89,7 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment,\n \n blobStore = new AzureBlobStore(metadata, environment.settings(), storageService);\n String container = getValue(metadata.settings(), settings, Repository.CONTAINER_SETTING, Storage.CONTAINER_SETTING);\n- ByteSizeValue configuredChunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Storage.CHUNK_SIZE_SETTING);\n- if (configuredChunkSize.getMb() > MAX_CHUNK_SIZE.getMb()) {\n- Setting<ByteSizeValue> setting = getEffectiveSetting(metadata.settings(), Repository.CHUNK_SIZE_SETTING, Storage.CHUNK_SIZE_SETTING);\n- throw new SettingsException(\"[\" + setting.getKey() + \"] must not exceed [\" + MAX_CHUNK_SIZE + \"] but is set to [\" + configuredChunkSize + \"].\");\n- } else {\n- this.chunkSize = configuredChunkSize;\n- }\n-\n+ this.chunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Storage.CHUNK_SIZE_SETTING);\n this.compress = getValue(metadata.settings(), settings, Repository.COMPRESS_SETTING, Storage.COMPRESS_SETTING);\n String modeStr = getValue(metadata.settings(), settings, Repository.LOCATION_MODE_SETTING, Storage.LOCATION_MODE_SETTING);\n Boolean forcedReadonly = metadata.settings().getAsBoolean(\"readonly\", null);", "filename": "plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java", "status": "modified" }, { "diff": "@@ -21,24 +21,19 @@\n \n import com.microsoft.azure.storage.LocationMode;\n import com.microsoft.azure.storage.StorageException;\n-import com.microsoft.azure.storage.blob.CloudBlobClient;\n import org.elasticsearch.cloud.azure.storage.AzureStorageService;\n-import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl;\n-import org.elasticsearch.cloud.azure.storage.AzureStorageSettings;\n import org.elasticsearch.cluster.metadata.RepositoryMetaData;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.ByteSizeUnit;\n+import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.common.xcontent.NamedXContentRegistry;\n import org.elasticsearch.env.Environment;\n-import org.elasticsearch.env.NodeEnvironment;\n import org.elasticsearch.test.ESTestCase;\n \n import java.io.IOException;\n-import java.net.URI;\n import java.net.URISyntaxException;\n \n-import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri;\n import static org.hamcrest.Matchers.is;\n-import static org.hamcrest.Matchers.nullValue;\n \n public class AzureRepositorySettingsTests extends ESTestCase {\n \n@@ -103,4 +98,30 @@ public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOff() throws Stora\n .put(\"readonly\", false)\n .build()).isReadOnly(), is(false));\n }\n+\n+ public void testChunkSize() throws StorageException, IOException, URISyntaxException {\n+ // default chunk size\n+ AzureRepository azureRepository = azureRepository(Settings.EMPTY);\n+ assertEquals(AzureStorageService.MAX_CHUNK_SIZE, azureRepository.chunkSize());\n+\n+ // chunk size in settings\n+ int size = randomIntBetween(1, 64);\n+ azureRepository = azureRepository(Settings.builder().put(\"chunk_size\", size + \"mb\").build());\n+ assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), azureRepository.chunkSize());\n+\n+ // zero bytes is not allowed\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->\n+ azureRepository(Settings.builder().put(\"chunk_size\", \"0\").build()));\n+ assertEquals(\"Failed to parse value [0] for setting [chunk_size] must be >= 1b\", e.getMessage());\n+\n+ // negative bytes not allowed\n+ e = expectThrows(IllegalArgumentException.class, () ->\n+ azureRepository(Settings.builder().put(\"chunk_size\", \"-1\").build()));\n+ assertEquals(\"Failed to parse value [-1] for setting [chunk_size] must be >= 1b\", e.getMessage());\n+\n+ // greater than max chunk size not allowed\n+ e = expectThrows(IllegalArgumentException.class, () ->\n+ azureRepository(Settings.builder().put(\"chunk_size\", \"65mb\").build()));\n+ assertEquals(\"Failed to parse value [65mb] for setting [chunk_size] must be <= 64mb\", e.getMessage());\n+ }\n }", "filename": "plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java", "status": "modified" }, { "diff": "@@ -46,6 +46,10 @@\n \n public class GoogleCloudStorageRepository extends BlobStoreRepository {\n \n+ // package private for testing\n+ static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);\n+ static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB);\n+\n public static final String TYPE = \"gcs\";\n \n public static final TimeValue NO_TIMEOUT = timeValueMillis(-1);\n@@ -57,7 +61,7 @@ public class GoogleCloudStorageRepository extends BlobStoreRepository {\n public static final Setting<Boolean> COMPRESS =\n boolSetting(\"compress\", false, Property.NodeScope, Property.Dynamic);\n public static final Setting<ByteSizeValue> CHUNK_SIZE =\n- byteSizeSetting(\"chunk_size\", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope, Property.Dynamic);\n+ byteSizeSetting(\"chunk_size\", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic);\n public static final Setting<String> APPLICATION_NAME =\n new Setting<>(\"application_name\", GoogleCloudStoragePlugin.NAME, Function.identity(), Property.NodeScope, Property.Dynamic);\n public static final Setting<String> SERVICE_ACCOUNT =\n@@ -77,9 +81,9 @@ public GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment env\n GoogleCloudStorageService storageService) throws Exception {\n super(metadata, environment.settings(), namedXContentRegistry);\n \n- String bucket = get(BUCKET, metadata);\n- String application = get(APPLICATION_NAME, metadata);\n- String serviceAccount = get(SERVICE_ACCOUNT, metadata);\n+ String bucket = getSetting(BUCKET, metadata);\n+ String application = getSetting(APPLICATION_NAME, metadata);\n+ String serviceAccount = getSetting(SERVICE_ACCOUNT, metadata);\n \n String basePath = BASE_PATH.get(metadata.settings());\n if (Strings.hasLength(basePath)) {\n@@ -105,8 +109,8 @@ public GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment env\n readTimeout = timeout;\n }\n \n- this.compress = get(COMPRESS, metadata);\n- this.chunkSize = get(CHUNK_SIZE, metadata);\n+ this.compress = getSetting(COMPRESS, metadata);\n+ this.chunkSize = getSetting(CHUNK_SIZE, metadata);\n \n logger.debug(\"using bucket [{}], base_path [{}], chunk_size [{}], compress [{}], application [{}]\",\n bucket, basePath, chunkSize, compress, application);\n@@ -139,7 +143,7 @@ protected ByteSizeValue chunkSize() {\n /**\n * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty.\n */\n- static <T> T get(Setting<T> setting, RepositoryMetaData metadata) {\n+ static <T> T getSetting(Setting<T> setting, RepositoryMetaData metadata) {\n T value = setting.get(metadata.settings());\n if (value == null) {\n throw new RepositoryException(metadata.name(), \"Setting [\" + setting.getKey() + \"] is not defined for repository\");", "filename": "plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java", "status": "modified" }, { "diff": "@@ -20,9 +20,11 @@\n package org.elasticsearch.repositories.gcs;\n \n import com.google.api.services.storage.Storage;\n+import org.elasticsearch.cluster.metadata.RepositoryMetaData;\n import org.elasticsearch.common.blobstore.gcs.MockHttpTransport;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.ByteSizeUnit;\n+import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.env.Environment;\n import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin;\n@@ -80,4 +82,42 @@ public Storage createClient(String serviceAccount, String application, TimeValue\n return storage.get();\n }\n }\n+\n+ public void testChunkSize() {\n+ // default chunk size\n+ RepositoryMetaData repositoryMetaData = new RepositoryMetaData(\"repo\", GoogleCloudStorageRepository.TYPE, Settings.EMPTY);\n+ ByteSizeValue chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData);\n+ assertEquals(GoogleCloudStorageRepository.MAX_CHUNK_SIZE, chunkSize);\n+\n+ // chunk size in settings\n+ int size = randomIntBetween(1, 100);\n+ repositoryMetaData = new RepositoryMetaData(\"repo\", GoogleCloudStorageRepository.TYPE,\n+ Settings.builder().put(\"chunk_size\", size + \"mb\").build());\n+ chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData);\n+ assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), chunkSize);\n+\n+ // zero bytes is not allowed\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {\n+ RepositoryMetaData repoMetaData = new RepositoryMetaData(\"repo\", GoogleCloudStorageRepository.TYPE,\n+ Settings.builder().put(\"chunk_size\", \"0\").build());\n+ GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);\n+ });\n+ assertEquals(\"Failed to parse value [0] for setting [chunk_size] must be >= 1b\", e.getMessage());\n+\n+ // negative bytes not allowed\n+ e = expectThrows(IllegalArgumentException.class, () -> {\n+ RepositoryMetaData repoMetaData = new RepositoryMetaData(\"repo\", GoogleCloudStorageRepository.TYPE,\n+ Settings.builder().put(\"chunk_size\", \"-1\").build());\n+ GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);\n+ });\n+ assertEquals(\"Failed to parse value [-1] for setting [chunk_size] must be >= 1b\", e.getMessage());\n+\n+ // greater than max chunk size not allowed\n+ e = expectThrows(IllegalArgumentException.class, () -> {\n+ RepositoryMetaData repoMetaData = new RepositoryMetaData(\"repo\", GoogleCloudStorageRepository.TYPE,\n+ Settings.builder().put(\"chunk_size\", \"101mb\").build());\n+ GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);\n+ });\n+ assertEquals(\"Failed to parse value [101mb] for setting [chunk_size] must be <= 100mb\", e.getMessage());\n+ }\n }", "filename": "plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java", "status": "modified" } ] }
{ "body": "In all versions of Elasticsearch where I've checked the source code, there's a bug in the error message for when a shard can't recover from the primary because the number of docs differ.\r\n\r\nIt lists numDocsTarget as the doc count for the source node, and numDocsSource as the doc count for the target node, but it should be the other way around. Since the only way I've found to fix these problems is to remove the shard that we've deemed to be wrong, this can be really dangerous and cause removal of the wrong shard.\r\n\r\nRelevant line of RecoverySourceHandler.java:\r\n\r\n> if (numDocsTarget != numDocsSource) {\r\n> throw new IllegalStateException(\"try to recover \" + request.shardId() + \" from primary shard with sync id but number of docs differ: \" + **numDocsTarget** + \" (\" + **request.sourceNode().getName()** + \", primary) vs \" + **numDocsSource** + \"(\" + **request.targetNode().getName()** + \")\");\r\n> }\r\n", "comments": [ { "body": "looks like a bug, thanks for raising this. do you wanna open a PR for this?", "created_at": "2016-11-30T21:35:04Z" }, { "body": "can you tell how you ran into this exception and what version you are using?", "created_at": "2016-11-30T21:36:03Z" }, { "body": "Sure, be happy to open a PR. We're currently using version 1.7, working on upgrading.\r\n\r\nAfter a node restarted, one of the shards failed to initialize with the error message I listed above. I'm guessing that a write was dropped on the replica at some earlier point, even though the overall operation reported success (if this is a known issue that is fixed in later versions, I'd be very interested to hear it). To fix this, we removed the shard files from the node reporting the wrong doc count and let it recover from the primary.\r\n\r\nWhen checking doc counts with /_cat/shards, I noticed that the doc count didn't match up with the error message. Checked on the source code to see where the number was coming from and found the bug.", "created_at": "2016-11-30T22:18:08Z" } ], "number": 21893, "title": "RecoverySourceHandler error message has source and target doc counts switched" }
{ "body": "There is a bug in the error message that is thrown if the number of docs differs between the source and target shards when recovering a shard with a syncId. The source and target doc counts are swapped around.\r\n\r\nCloses #21893", "number": 22515, "review_comments": [], "title": "Fixing shard recovery error message to report the number of docs correctly for each node" }
{ "commits": [ { "message": "Fixing the error message to report the number of docs correctly for each node" } ], "files": [ { "diff": "@@ -222,7 +222,7 @@ public void phase1(final IndexCommit snapshot, final Translog.View translogView)\n final long numDocsSource = recoverySourceMetadata.getNumDocs();\n if (numDocsTarget != numDocsSource) {\n throw new IllegalStateException(\"try to recover \" + request.shardId() + \" from primary shard with sync id but number \" +\n- \"of docs differ: \" + numDocsTarget + \" (\" + request.sourceNode().getName() + \", primary) vs \" + numDocsSource\n+ \"of docs differ: \" + numDocsSource + \" (\" + request.sourceNode().getName() + \", primary) vs \" + numDocsTarget\n + \"(\" + request.targetNode().getName() + \")\");\n }\n // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java", "status": "modified" } ] }
{ "body": "5.1.1\r\n\r\nHave a test index with the following document in 1 cluster:\r\n\r\n```\r\nPOST test/type\r\n{\r\n \"test1\":\"test1\",\r\n \"test2\":\"test2\"\r\n}\r\n```\r\n\r\nWhen using reindex api (local), it successfully excludes field test2 from the destination index.\r\n\r\n```\r\nPOST /_reindex\r\n{\r\n \"source\": {\r\n \"index\": \"test\",\r\n \"type\": \"type\",\r\n \"_source\": {\r\n \"excludes\": [\r\n \"test2\"\r\n ]\r\n }\r\n },\r\n \"dest\": {\r\n \"index\": \"test2\",\r\n \"type\": \"type\"\r\n }\r\n}\r\n```\r\n\r\n```\r\n{\r\n \"took\": 1,\r\n \"timed_out\": false,\r\n \"_shards\": {\r\n \"total\": 5,\r\n \"successful\": 5,\r\n \"failed\": 0\r\n },\r\n \"hits\": {\r\n \"total\": 1,\r\n \"max_score\": 1,\r\n \"hits\": [\r\n {\r\n \"_index\": \"test2\",\r\n \"_type\": \"type\",\r\n \"_id\": \"AVmBwRozFIrGdyiOxk9x\",\r\n \"_score\": 1,\r\n \"_source\": {\r\n \"test1\": \"test1\"\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\nWhen using reindex remote:\r\n\r\n```\r\ncurl -u elastic:changeme -XPOST \"http://localhost:9999/_reindex\" -d '{\r\n \"source\": {\r\n \"remote\":{\"host\":\"http://localhost:9200\",\"username\":\"elastic\",\r\n \"password\":\"changeme\"},\r\n \"index\": \"test\",\r\n \"type\": \"type\",\r\n \"_source\": {\r\n \"excludes\": [\r\n \"test2\"\r\n ]\r\n }\r\n },\r\n \"dest\": {\r\n \"index\": \"test\",\r\n \"type\": \"type\"\r\n }\r\n}'\r\n```\r\n\r\ntest2 field remains in the resulting index:\r\n\r\n```\r\ncurl -u elastic:changeme http://localhost:9999/test/_search?pretty\r\n{\r\n \"took\" : 1,\r\n \"timed_out\" : false,\r\n \"_shards\" : {\r\n \"total\" : 5,\r\n \"successful\" : 5,\r\n \"failed\" : 0\r\n },\r\n \"hits\" : {\r\n \"total\" : 1,\r\n \"max_score\" : 1.0,\r\n \"hits\" : [\r\n {\r\n \"_index\" : \"test\",\r\n \"_type\" : \"type\",\r\n \"_id\" : \"AVmBwRozFIrGdyiOxk9x\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"test1\" : \"test1\",\r\n \"test2\" : \"test2\"\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```", "comments": [], "number": 22507, "title": "Reindex from remote and source filtering" }
{ "body": "Reindex-from-remote was accepting source filtering in the request\r\nbut ignoring it and setting `_source=true` on the search URI. This\r\nfixes the filtering so it is piped through to the remote node and\r\nadds tests for that.\r\n\r\nCloses #22507\r\n", "number": 22514, "review_comments": [], "title": "Fix source filtering in reindex-from-remote" }
{ "commits": [ { "message": "Fix source filtering in reindex-from-remote\n\nReindex-from-remote was accepting source filtering in the request\nbut ignoring it and setting `_source=true` on the search URI. This\nfixes the filtering so it is piped through to the remote node and\nadds tests for that.\n\nCloses #22507" } ], "files": [ { "diff": "@@ -71,6 +71,9 @@ public ActionRequestValidationException validate() {\n if (getSearchRequest().indices() == null || getSearchRequest().indices().length == 0) {\n e = addValidationError(\"use _all if you really want to copy from all existing indexes\", e);\n }\n+ if (getSearchRequest().source().fetchSource() != null && getSearchRequest().source().fetchSource().fetchSource() == false) {\n+ e = addValidationError(\"_source:false is not supported in this context\", e);\n+ }\n /*\n * Note that we don't call index's validator - it won't work because\n * we'll be filling in portions of it as we receive the docs. But we can", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java", "status": "modified" }, { "diff": "@@ -102,27 +102,30 @@ static Map<String, String> initialSearchParams(SearchRequest searchRequest, Vers\n String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? \"fields\" : \"stored_fields\";\n params.put(storedFieldsParamName, fields.toString());\n }\n- // We always want the _source document and this will force it to be returned.\n- params.put(\"_source\", \"true\");\n return params;\n }\n \n- static HttpEntity initialSearchEntity(BytesReference query) {\n+ static HttpEntity initialSearchEntity(SearchRequest searchRequest, BytesReference query) {\n // EMPTY is safe here because we're not calling namedObject\n try (XContentBuilder entity = JsonXContent.contentBuilder();\n XContentParser queryParser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, query)) {\n entity.startObject();\n- entity.field(\"query\");\n- /*\n- * We're intentionally a bit paranoid here - copying the query as xcontent rather than writing a raw field. We don't want poorly\n- * written queries to escape. Ever.\n- */\n- entity.copyCurrentStructure(queryParser);\n- XContentParser.Token shouldBeEof = queryParser.nextToken();\n- if (shouldBeEof != null) {\n- throw new ElasticsearchException(\n- \"query was more than a single object. This first token after the object is [\" + shouldBeEof + \"]\");\n+\n+ entity.field(\"query\"); {\n+ /* We're intentionally a bit paranoid here - copying the query as xcontent rather than writing a raw field. We don't want\n+ * poorly written queries to escape. Ever. */\n+ entity.copyCurrentStructure(queryParser);\n+ XContentParser.Token shouldBeEof = queryParser.nextToken();\n+ if (shouldBeEof != null) {\n+ throw new ElasticsearchException(\n+ \"query was more than a single object. This first token after the object is [\" + shouldBeEof + \"]\");\n+ }\n }\n+\n+ if (searchRequest.source().fetchSource() != null) {\n+ entity.field(\"_source\", searchRequest.source().fetchSource());\n+ }\n+\n entity.endObject();\n BytesRef bytes = entity.bytes().toBytesRef();\n return new ByteArrayEntity(bytes.bytes, bytes.offset, bytes.length, ContentType.APPLICATION_JSON);", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java", "status": "modified" }, { "diff": "@@ -101,7 +101,7 @@ protected void doStart(Consumer<? super Response> onResponse) {\n lookupRemoteVersion(version -> {\n remoteVersion = version;\n execute(\"POST\", initialSearchPath(searchRequest), initialSearchParams(searchRequest, version),\n- initialSearchEntity(query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r));\n+ initialSearchEntity(searchRequest, query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r));\n });\n }\n ", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java", "status": "modified" }, { "diff": "@@ -152,20 +152,29 @@ public void testInitialSearchParamsMisc() {\n assertThat(params, scroll == null ? not(hasKey(\"scroll\")) : hasEntry(\"scroll\", scroll.toString()));\n assertThat(params, hasEntry(\"size\", Integer.toString(size)));\n assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry(\"version\", null) : not(hasEntry(\"version\", null)));\n- assertThat(params, hasEntry(\"_source\", \"true\"));\n }\n \n public void testInitialSearchEntity() throws IOException {\n+ SearchRequest searchRequest = new SearchRequest();\n+ searchRequest.source(new SearchSourceBuilder());\n String query = \"{\\\"match_all\\\":{}}\";\n- HttpEntity entity = initialSearchEntity(new BytesArray(query));\n+ HttpEntity entity = initialSearchEntity(searchRequest, new BytesArray(query));\n assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue());\n assertEquals(\"{\\\"query\\\":\" + query + \"}\",\n Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)));\n \n+ // Source filtering is included if set up\n+ searchRequest.source().fetchSource(new String[] {\"in1\", \"in2\"}, new String[] {\"out\"});\n+ entity = initialSearchEntity(searchRequest, new BytesArray(query));\n+ assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue());\n+ assertEquals(\"{\\\"query\\\":\" + query + \",\\\"_source\\\":{\\\"includes\\\":[\\\"in1\\\",\\\"in2\\\"],\\\"excludes\\\":[\\\"out\\\"]}}\",\n+ Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)));\n+\n // Invalid XContent fails\n- RuntimeException e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray(\"{}, \\\"trailing\\\": {}\")));\n+ RuntimeException e = expectThrows(RuntimeException.class,\n+ () -> initialSearchEntity(searchRequest, new BytesArray(\"{}, \\\"trailing\\\": {}\")));\n assertThat(e.getCause().getMessage(), containsString(\"Unexpected character (',' (code 44))\"));\n- e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray(\"{\")));\n+ e = expectThrows(RuntimeException.class, () -> initialSearchEntity(searchRequest, new BytesArray(\"{\")));\n assertThat(e.getCause().getMessage(), containsString(\"Unexpected end-of-input\"));\n }\n ", "filename": "modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java", "status": "modified" }, { "diff": "@@ -416,3 +416,42 @@\n type: foo\n id: 1\n - match: { _source: {} }\n+\n+---\n+\"Reindex with source filtering\":\n+ - do:\n+ index:\n+ index: source\n+ type: foo\n+ id: 1\n+ body: { \"text\": \"test\", \"filtered\": \"removed\" }\n+ refresh: true\n+\n+ - do:\n+ reindex:\n+ refresh: true\n+ body:\n+ source:\n+ index: source\n+ _source:\n+ excludes:\n+ - filtered\n+ dest:\n+ index: dest\n+ - match: {created: 1}\n+ - match: {updated: 0}\n+ - match: {version_conflicts: 0}\n+ - match: {batches: 1}\n+ - match: {failures: []}\n+ - match: {throttled_millis: 0}\n+ - gte: { took: 0 }\n+ - is_false: task\n+ - is_false: deleted\n+\n+ - do:\n+ get:\n+ index: dest\n+ type: foo\n+ id: 1\n+ - match: { _source.text: \"test\" }\n+ - is_false: _source.filtered", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml", "status": "modified" }, { "diff": "@@ -295,3 +295,21 @@\n index: test\n dest:\n index: dest\n+\n+---\n+\"_source:false is rejected\":\n+ - do:\n+ index:\n+ index: source\n+ type: foo\n+ id: 1\n+ body: { \"text\": \"test\" }\n+ - do:\n+ catch: /_source:false is not supported in this context/\n+ reindex:\n+ body:\n+ source:\n+ index: source\n+ _source: false\n+ dest:\n+ index: dest", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml", "status": "modified" }, { "diff": "@@ -311,3 +311,53 @@\n index: source\n dest:\n index: dest\n+\n+---\n+\"Reindex from remote with source filtering\":\n+ - do:\n+ index:\n+ index: source\n+ type: foo\n+ id: 1\n+ body: { \"text\": \"test\", \"filtered\": \"removed\" }\n+ refresh: true\n+\n+ # Fetch the http host. We use the host of the master because we know there will always be a master.\n+ - do:\n+ cluster.state: {}\n+ - set: { master_node: master }\n+ - do:\n+ nodes.info:\n+ metric: [ http ]\n+ - is_true: nodes.$master.http.publish_address\n+ - set: {nodes.$master.http.publish_address: host}\n+ - do:\n+ reindex:\n+ refresh: true\n+ body:\n+ source:\n+ remote:\n+ host: http://${host}\n+ index: source\n+ _source:\n+ excludes:\n+ - filtered\n+ dest:\n+ index: dest\n+ - match: {created: 1}\n+ - match: {updated: 0}\n+ - match: {version_conflicts: 0}\n+ - match: {batches: 1}\n+ - match: {failures: []}\n+ - match: {throttled_millis: 0}\n+ - gte: { took: 0 }\n+ - is_false: task\n+ - is_false: deleted\n+\n+ - do:\n+ get:\n+ index: dest\n+ type: foo\n+ id: 1\n+ - match: { _source.text: \"test\" }\n+ - is_false: _source.filtered", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml", "status": "modified" } ] }
{ "body": "I tried to negates a keyword at query but it already comes in result\n\nsimple query is \n\"query\": {\n \"simple_query_string\": {\n \"query\": \"\\\"This repository\\\" -removed\",\n \"fields\": [\n \"content\",\n \"headline\"\n ]\n }\n },\n", "comments": [ { "body": "Could you provide a complete reproduction that would help us to reproduce the issue? See http://www.elasticsearch.org/help/ for an example.\n", "created_at": "2014-01-13T21:41:42Z" }, { "body": "Recreation:\n\n```\nPUT /t/t/1\n{\n \"content\": \"This repository has been removed\"\n}\n\n\nGET /t/_search\n{\n \"query\": {\n \"simple_query_string\": {\n \"query\": \"+this -removed\",\n \"fields\": [\n \"content\",\n \"headline\"\n ]\n }\n }\n}\n```\n\nThe negated term is ignored, because with `default_operator: \"OR\"`, it is optional.\n", "created_at": "2014-12-24T18:14:50Z" }, { "body": "@clintongormley not sure how this is a bug though? This is like saying \"contains the term **this** OR does not contain **removed**\", which the document does (it contains the term \"this\")\n", "created_at": "2014-12-24T18:59:57Z" }, { "body": "I think that a user would expect `foo bar -baz` (with default operator `OR`) to match as if they had written: `+(foo bar) -baz`. Otherwise, a negated clause with default operator `OR` is meaningless.\n", "created_at": "2014-12-29T10:19:50Z" }, { "body": "@clintongormley I'm still not sure that this is actually a bug, the negated clause with the default `OR` operation is not meaningless (it has meaning, it just means\" anything that does not contain this\"). I think what is desired is setting `default_operator` to `AND`, which is a user-changeable setting?\n", "created_at": "2015-02-23T23:20:12Z" }, { "body": "@dakrone @clintongormley Here is another [use case](https://gist.github.com/ppf2/e56c34151113115c1266) from the field. Is this a bug or not currently supported (a feature)? Don't think default_operator:AND will help here since the use case is to return both documents in the example. thx\n", "created_at": "2015-08-19T04:55:28Z" }, { "body": "@ppf2 Actually, your example works as expected if you use the `simple_query_string` (you used the `query_string`query instead). As a query string query, you would need to change:\n\n```\n\"(name:Android) OR NOT (status:approved)\"\n```\n\nto:\n\n```\n\"(name:Android) OR (NOT status:approved)\"\n```\n\nThe latter, when run through validate-query, shows the following:\n\n```\n\"name:android (-status:approved +*:*)\"\n```\n", "created_at": "2015-08-25T08:25:49Z" }, { "body": "Ah got it, thx @clintongormley for the tip!\n", "created_at": "2015-08-25T17:03:24Z" }, { "body": "Taking a look.\n", "created_at": "2015-10-14T15:32:36Z" }, { "body": "This is working as expected as @dakrone has inferred. The SimpleQueryParser should be thought of as only using AND and OR as operators. There is no concept of SHOULD and MUST other than internally to create the AND and OR queries. So when doing the query \"+this -removed\" the AND (+) is actually ignored as it is not thought of as a MUST. Using SimpleQueryParser this will always be the case where the query ends up being documents that either have 'this' OR not 'removed' ... Also note, that while this will return all the documents, the not 'removed' still affects scoring so it's not meaningless. Going to leave this open for now for further discussion if necessary.\n", "created_at": "2015-10-14T16:54:37Z" }, { "body": "While it may be working as designed, I'd argue that the syntax is surprising to most users. For example:\n\n```\nPOST t/t/_bulk\n{\"index\":{\"_id\":1}}\n{\"foo\":\"one\"}\n{\"index\":{\"_id\":2}}\n{\"foo\":\"two\"}\n{\"index\":{\"_id\":3}}\n{\"foo\":\"one two\"}\n{\"index\":{\"_id\":4}}\n{\"foo\":\"three\"}\n```\n\nI would expect the following:\n- `\"one\"`: Return docs 1 & 3 (works)\n- `\"-two\"`: Return docs 1 & 3 (works)\n- `\"one -two\"`: Return doc 1 (returns 1, 3, & 4)\n- `\"one three -two\"`: Return docs 1 & 4 (returns 1, 3 & 4)\n\nTo get what I want (ie \"Give me docs with one or three, but exclude anything with two\") I need to write it as `\"one three +-two\"`. That is not at all intuitive. If I typed `\"windows -microsoft\"` into google, I wouldn't expect Google to return all of the documents on the internet which don't contain the word microsoft.\n\nAt the very least it should be well documented but, given that this query is intended to be exposed to users who will not read documentation, I would say that the syntax could be improved.\n", "created_at": "2015-10-15T11:51:52Z" }, { "body": "> If I typed \"windows -microsoft\" into google, I wouldn't expect Google to return all of the documents on the internet which don't contain the word microsoft.\n\n@clintongormley that's because google (as well as most other search engines in 21st century) is using `AND` instead of `OR` as a default operator, which should be the default behavior for elasticsearch as well IMO. Having `OR` as a default operator is causing all sort of confusion for many new users.\n", "created_at": "2015-10-15T14:24:32Z" }, { "body": "Not really. if you query https://www.google.com/?gws_rd=ssl#q=elasticsearch+reference+query+dsl+oracle it gladly returns high ranking hits and just tells you: Missing: oracle\n\nSwitching to AND breaks many analysis chains such as n-grams. With a good ranking algo its also not necessary, its just that DefaultSimilarity is really weak here.\n", "created_at": "2015-10-15T14:32:44Z" }, { "body": "I agree that this syntax is ugly -- \"one three +-two\" ; however, I am reluctant to special case the not operator because right now you have one OR three OR NOT two which while may be unexpected is predictable, but if I change this it becomes one OR three AND NOT two which is no longer predictable because it ignores the default operator and it loses its consistency. It is also very difficult to predict proper sub queries outside of this simple case. Take for example \"one -three two\" -- is this one AND not three OR two? Do I need to reorder this? I think this would end up being more confusing because of the way operator precedence works in that it's always first come first serve.\n", "created_at": "2015-10-15T15:46:28Z" }, { "body": "What google does, is some weird \"fuzzy\" AND (or something like `should` with large minimum should match) search that google turns on a long tail queries with a large number of terms. But the basic behavior with 2-3 term queries resembles AND more than OR, would you agree?\n\nn-grams is an advanced feature, I think if a user can figure out how to enable n-gram (or configure any other custom analysis chain) they should be able to figure out how to switch from AND to OR in the query.\n\nAnyway, I shouldn't hijack this discussion. I apologize for that. Back to the original topic. I think that my expectation would be that `foo bar +baz -qux` should be translated into something like this:\n\n```\n{\n \"bool\": {\n \"should\" : [\n {\n \"term\" : { \"_all\" : \"foo\" }\n }, {\n \"term\" : { \"_all\" : \"bar\" }\n }\n ],\n \"must\" : {\n \"term\" : { \"_all\" : \"baz\" }\n },\n \"must_not\" : {\n \"term\" : { \"_all\" : \"qux\" }\n }\n }\n}\n```\n", "created_at": "2015-10-15T15:51:13Z" }, { "body": "I should explain further what happens right now, each time an AND is switched to an OR or vice versa a new boolean query branch is created. So if you have a b c +d +e f the tree ends up looking like\n\nbq( should bq( should bq( should a should b should c ) must d must e ) should f)\n\nso changing the not operator to always use must will have an inconsistent change in boolean query branches since operator precedence is always left to right.\n\nWe could change it to be something like @imotov suggests (maybe this should be a different parser altogether in Lucene?), but then you have should, must, and must not... if you're truly a basic user I think and/or is easier to understand than should/must/must not.\n", "created_at": "2015-10-15T16:00:30Z" }, { "body": "> I should explain further what happens right now, each time an AND is switched to an OR or vice versa a new boolean query branch is created. \n\nYes, and this is where it breaks my expectation. To me order of elements in the query shouldn't make any difference because \"+\" and \"-\" feel like unary operators but they behave in strange ways.\n", "created_at": "2015-10-15T16:13:24Z" }, { "body": "@imotov What you're saying makes sense to me from the point of view of someone that regularly deals with search, but for someone less technical I think and/or make more sense. Honestly, the default to OR is a bit odd to me too because if someone, say my mother, types \"dog food\" into the google she expects it to be anded together there at least through decent scoring (as you and @rmuir mentioned earlier). I think making a new parser with the behavior of must/should/must not makes sense depending on what our target audience wants. SimpleQueryParser2 or something.\n", "created_at": "2015-10-15T20:08:26Z" }, { "body": "All right after a bit more thought and discussion, I've come to agree with everyone in this issue that this behavior is unexpected for everyone. I'll work on making a Lucene patch for the SimpleQueryParser using the behavior describe by @imotov and @rmuir where the structure will be a single bq per subquery.\n", "created_at": "2015-10-15T21:53:34Z" }, { "body": "@jdconrad did anything ever come of this? Did you open any issue in Lucene that we can track?\n", "created_at": "2016-11-06T11:18:51Z" }, { "body": "@clintongormley Sorry, I must've gotten distracted by other issues before I had anytime to address this. I'll have to take a bit of time to remember what we had discussed.\n", "created_at": "2016-11-07T17:29:42Z" }, { "body": "Let's document and close", "created_at": "2016-12-23T10:23:37Z" }, { "body": "Okay, opened a PR to document this, and then it can be closed.", "created_at": "2017-01-06T23:48:48Z" }, { "body": "Just stumbled on this limitation myself. I'd like to echo @imotov 's suggestion from Oct 15, 2017. (I'll paste below). \r\n \r\n> (quote) I think that my expectation would be that `foo bar +baz -qux` should be translated into something like this:\r\n```\r\n{\r\n \"bool\": {\r\n \"should\" : [\r\n {\r\n \"term\" : { \"_all\" : \"foo\" }\r\n }, {\r\n \"term\" : { \"_all\" : \"bar\" }\r\n }\r\n ],\r\n \"must\" : {\r\n \"term\" : { \"_all\" : \"baz\" }\r\n },\r\n \"must_not\" : {\r\n \"term\" : { \"_all\" : \"qux\" }\r\n }\r\n }\r\n}\r\n```\r\n\r\n\r\nThe usecase is based on the previously-mentioned 'google expectation' (or really any major search engine at this point) that can be approximated with a `default_operator: \"OR\"` accompanied by a high `minimum_should_match`.\r\n\r\nThe appeal of `simple_query_string` is in the ability to meet the syntactical needs of users who are familiar with Google operators (without throwing parse exceptions).\r\n\r\nI'm a little bummed that I have to roll my own parser to offer a commonly-accepted negation operator. It's not the end of the world, but adds friction to anyone looking for an otherwise extremely nice (almost turnkey) drop-in query which largely meets common syntax expectations.\r\n\r\n@clintongormley, where is the right forum to re-open this? It looks like this was closed with a doc-comment b/c it belongs in Lucene's JIRA.", "created_at": "2018-05-11T18:16:03Z" } ], "number": 4707, "title": "simple_query_string `-` operator not working with default_operator `OR`" }
{ "body": "This can be confusing when unexpected.\r\n\r\nResolves #4707", "number": 22480, "review_comments": [], "title": "Document simple_query_string negation with default_operator of OR" }
{ "commits": [ { "message": "Document simple_query_string negation with default_operator of OR\n\nThis can be confusing when unexpected.\n\nResolves #4707" } ], "files": [ { "diff": "@@ -83,6 +83,30 @@ The `simple_query_string` supports the following special characters:\n In order to search for any of these special characters, they will need to\n be escaped with `\\`.\n \n+Be aware that this syntax may have a different behavior depending on the\n+`default_operator` value. For example, consider the following query:\n+\n+[source,js]\n+--------------------------------------------------\n+GET /_search\n+{\n+ \"query\": {\n+ \"simple_query_string\" : {\n+ \"fields\" : [\"content\"],\n+ \"query\" : \"foo bar -baz\"\n+ }\n+ }\n+}\n+--------------------------------------------------\n+// CONSOLE\n+\n+You may expect that documents containing only \"foo\" or \"bar\" will be returned,\n+as long as they do not contain \"baz\", however, due to the `default_operator`\n+being OR, this really means \"match documents that contain \"foo\" or documents\n+that contain \"bar\", or documents that don't contain \"baz\". If this is unintended\n+then the query can be switched to `\"foo bar +-baz\"` which will not return\n+documents that contain \"baz\".\n+\n [float]\n ==== Default Field\n When not explicitly specifying the field to search on in the query", "filename": "docs/reference/query-dsl/simple-query-string-query.asciidoc", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0\r\n\r\n**Plugins installed**: none\r\n\r\n**JVM version**: any\r\n\r\n**OS version**: any\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nWhen you upgrade elasticsearch with an index using a string field with fielddata disabled from 2.x to 5.x, and you try to shrink it, it fails saying that the index has unsupported parameters: `[fielddata : false]`.\r\n\r\nBasically it looks like when we upgrade an elasticsearch from 2.x to 5.x it changes the following:\r\n\r\n```\r\n{\r\n \"mappings\": {\r\n \"type1\": {\r\n \"properties\": {\r\n \"@version\": {\r\n \"type\": \"string\",\r\n \"index\": \"not_analyzed\",\r\n \"fielddata\": {\r\n \"format\": \"disabled\"\r\n }\r\n }\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nInto a 5.x mapping withe the following structure:\r\n\r\n```\r\n{\r\n \"doc\": {\r\n \"mappings\": {\r\n \"type1\": {\r\n \"properties\": {\r\n \"@version\": {\r\n \"type\": \"string\",\r\n \"index\": \"not_analyzed\",\r\n \"fielddata\": false\r\n }\r\n }\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nThen you try to shrink that index, and since string doesn't exist anymore it doesn't know what to do. We should consider either add a note that it's required to shrink an index that it was created in 5.x, or fix this bug by shrinking old indices as well.\r\n\r\n**Steps to reproduce**:\r\n\r\n[1] Create an index in 2.x with the following:\r\n\r\n```\r\nPUT doc\r\n{\r\n \"mappings\": {\r\n \"type1\": {\r\n \"properties\": {\r\n \"@version\": {\r\n \"type\": \"string\",\r\n \"index\": \"not_analyzed\",\r\n \"fielddata\": {\r\n \"format\": \"disabled\"\r\n }\r\n }\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\n[2] Upgrade this elasticsearch instance, or copy the index into the data directory of a 5.x instance so it's picked up as dangling index.\r\n\r\n[3] Verify that the index was upgraded:\r\n\r\n```\r\nGET doc/type1/_mapping\r\n\r\n{\r\n \"doc\": {\r\n \"mappings\": {\r\n \"type1\": {\r\n \"properties\": {\r\n \"@version\": {\r\n \"type\": \"string\",\r\n \"index\": \"not_analyzed\",\r\n \"fielddata\": false\r\n }\r\n }\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\n[4] Shrink the index:\r\n\r\n```\r\nPUT doc/_settings\r\n{\r\n \"settings\": {\r\n \"index.blocks.write\": true \r\n }\r\n}\r\n\r\nPOST doc/_shrink/doc_shrinked_index\r\n{\r\n \"settings\": {\r\n \"index.number_of_replicas\": 1,\r\n \"index.number_of_shards\": 1, \r\n \"index.codec\": \"best_compression\" \r\n }\r\n}\r\n```\r\n\r\n[5] Verify logs\r\n\r\n**Provide logs (if relevant)**:\r\n\r\n```\r\n[2016-12-29T16:14:00,908][WARN ][o.e.c.a.s.ShardStateAction] [K-E6Kbx] [doc_shrinked_index][0] received shard failed for shard id [[doc_shrinked_index][0]], allocation id [wbxqZDZSRZ6YCYq2iCIfEQ], primary term [0], message [failed recovery], failure [RecoveryFailedException[[doc_shrinked_index][0]: Recovery failed from null into {K-E6Kbx}{K-E6Kbx4TryyqoNBVUZF-w}{C3ZFU6q-QxK01plsEjaRyQ}{127.0.0.1}{127.0.0.1:9300}{box_type=medium}]; nested: MapperParsingException[Mapping definition for [@version] has unsupported parameters: [fielddata : false]]; ]\r\norg.elasticsearch.indices.recovery.RecoveryFailedException: [doc_shrinked_index][0]: Recovery failed from null into {K-E6Kbx}{K-E6Kbx4TryyqoNBVUZF-w}{C3ZFU6q-QxK01plsEjaRyQ}{127.0.0.1}{127.0.0.1:9300}{box_type=medium}\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$4(IndexShard.java:1536) [elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.shard.IndexShard$$Lambda$1653/403275356.run(Unknown Source) [elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:444) [elasticsearch-5.0.0.jar:5.0.0]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_45]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_45]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_45]\r\nCaused by: org.elasticsearch.index.mapper.MapperParsingException: Mapping definition for [@version] has unsupported parameters: [fielddata : false]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.checkNoRemainingFields(DocumentMapperParser.java:146) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.checkNoRemainingFields(DocumentMapperParser.java:141) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.ObjectMapper$TypeParser.parseProperties(ObjectMapper.java:289) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.ObjectMapper$TypeParser.parseObjectOrDocumentTypeProperties(ObjectMapper.java:203) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.RootObjectMapper$TypeParser.parse(RootObjectMapper.java:102) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:110) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:91) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:508) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.applyRequest(MetaDataMappingService.java:276) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.execute(MetaDataMappingService.java:241) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:555) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:894) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:444) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:237) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:200) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\t... 3 more\r\n[2016-12-29T16:14:00,911][INFO ][o.e.c.r.a.AllocationService] [K-E6Kbx] Cluster health status changed from [YELLOW] to [RED] (reason: [shards failed [[doc_shrinked_index][0]] ...]).\r\n```\r\n\r\n*Note that it went from YELLOW to RED since the shard was not able to start\r\n", "comments": [ { "body": "I tried to use templates prior to shrink, and got the following exception:\r\n\r\n```\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"mappings are not allowed when shrinking indices, all mappings are copied from the source index\"\r\n }\r\n ],\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"mappings are not allowed when shrinking indices, all mappings are copied from the source index\"\r\n },\r\n \"status\": 400\r\n}\r\n```\r\n\r\nIf the mappings are always copied from the source index, then it means that there is no workaround for this issue.", "created_at": "2016-12-29T18:49:40Z" }, { "body": "Would using the reindex API be a workaround @gmoskovicz ?", "created_at": "2016-12-30T10:01:53Z" }, { "body": "Reindexing *is* a workaround. But i meant that there is no workaround as previous steps to actually use the shrink API with some previous steps.", "created_at": "2016-12-30T11:02:31Z" }, { "body": "I think the issue here is that we don't use the `index_version_created` for the target index which we should. I will look into this in a bit.", "created_at": "2017-01-06T10:33:59Z" }, { "body": "I just build an ES version from a snapshot and confirmed that the bug is fixed. thanks for reporting this @gmoskovicz ", "created_at": "2017-01-06T16:04:11Z" }, { "body": "Thanks for fixing it @s1monw ", "created_at": "2017-01-06T16:55:01Z" }, { "body": "@s1monw still happening in v5.2.1:\r\n\r\n```\r\n[2017-02-21T11:06:41,686][WARN ][o.e.c.a.s.ShardStateAction] [e9D0OYU] [doc_shrinked_index][0] received shard failed for shard id [[doc_shrinked_index][0]], allocation id [9DidZyR5R1GbdtJLfrM2Tw], primary term [0], message [failed recovery], failure [RecoveryFailedException[[doc_shrinked_index][0]: Recovery failed on {e9D0OYU}{e9D0OYUURbClpiOZmPE1Hg}{Icm2utt3Q4SFK5YHut-a2Q}{127.0.0.1}{127.0.0.1:9300}]; nested: MapperParsingException[Mapping definition for [@version] has unsupported parameters: [fielddata : false]]; ]\r\norg.elasticsearch.indices.recovery.RecoveryFailedException: [doc_shrinked_index][0]: Recovery failed on {e9D0OYU}{e9D0OYUURbClpiOZmPE1Hg}{Icm2utt3Q4SFK5YHut-a2Q}{127.0.0.1}{127.0.0.1:9300}\r\n\tat org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$4(IndexShard.java:1537) [elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.shard.IndexShard$$Lambda$1580/1463687544.run(Unknown Source) [elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:527) [elasticsearch-5.2.1.jar:5.2.1]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_45]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_45]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_45]\r\nCaused by: org.elasticsearch.index.mapper.MapperParsingException: Mapping definition for [@version] has unsupported parameters: [fielddata : false]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.checkNoRemainingFields(DocumentMapperParser.java:151) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.checkNoRemainingFields(DocumentMapperParser.java:145) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.ObjectMapper$TypeParser.parseProperties(ObjectMapper.java:289) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.ObjectMapper$TypeParser.parseObjectOrDocumentTypeProperties(ObjectMapper.java:203) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.RootObjectMapper$TypeParser.parse(RootObjectMapper.java:102) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:111) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:91) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:602) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.applyRequest(MetaDataMappingService.java:264) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.execute(MetaDataMappingService.java:230) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.cluster.service.ClusterService.executeTasks(ClusterService.java:674) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.cluster.service.ClusterService.calculateTaskOutputs(ClusterService.java:653) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.cluster.service.ClusterService.runTasks(ClusterService.java:612) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:1112) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:527) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[elasticsearch-5.2.1.jar:5.2.1]\r\n\t... 3 more\r\n```\r\n\r\nSame repro steps.", "created_at": "2017-02-21T13:06:32Z" }, { "body": "Looks like we need to backport this fix into 5.2, it was backported to 5.1 and 5.3 already.", "created_at": "2017-02-21T13:17:35Z" }, { "body": "@gmoskovicz thanks for reopening see https://github.com/elastic/elasticsearch/pull/22469#issuecomment-281344080", "created_at": "2017-02-21T13:28:49Z" }, { "body": "Roger that! Thanks for the quick fix @s1monw !", "created_at": "2017-02-21T13:31:26Z" } ], "number": 22373, "title": "Shrinking index upgraded from 2.x fails" }
{ "body": "Today when an index is shrunk the version information is not carried over\r\nfrom the source to the target index. This can cause major issues like mapping\r\nincompatibilities for instance if an index from a previous major version is shrunk.\r\n\r\nThis commit ensures that all version information from the soruce index is preserved\r\nwhen a shrunk index is created.\r\n\r\nCloses #22373", "number": 22469, "review_comments": [], "title": "Ensure shrunk indices carry over version information from its source" }
{ "commits": [ { "message": "Ensure shrunk indices carry over version information from its source\n\nToday when an index is shrunk the version information is not carried over\nfrom the source to the target index. This can cause major issues like mapping\nincompatibilities for instance if an index from a previous major version is shrunk.\n\nThis commit ensures that all version information from the soruce index is preserved\nwhen a shrunk index is created.\n\nCloses #22373" } ], "files": [ { "diff": "@@ -545,6 +545,7 @@ static List<String> validateShrinkIndex(ClusterState state, String sourceIndex,\n throw new IllegalArgumentException(\"mappings are not allowed when shrinking indices\" +\n \", all mappings are copied from the source index\");\n }\n+\n if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {\n // this method applies all necessary checks ie. if the target shards are less than the source shards\n // of if the source shards are divisible by the number of target shards\n@@ -588,9 +589,14 @@ static void prepareShrinkIndexSettings(ClusterState currentState, Set<String> ma\n .put(\"index.allocation.max_retries\", 1)\n // now copy all similarity / analysis settings - this overrides all settings from the user unless they\n // wanna add extra settings\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion())\n+ .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion())\n .put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate))\n .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName())\n .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID());\n+ if (sourceMetaData.getMinimumCompatibleVersion() != null) {\n+ indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, sourceMetaData.getMinimumCompatibleVersion());\n+ }\n }\n \n }", "filename": "core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java", "status": "modified" }, { "diff": "@@ -21,26 +21,19 @@\n \n import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.UnavailableShardsException;\n-import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;\n import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;\n import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.support.ActiveShardCount;\n import org.elasticsearch.action.support.IndicesOptions;\n-import org.elasticsearch.cluster.ClusterInfoService;\n import org.elasticsearch.cluster.ClusterState;\n-import org.elasticsearch.cluster.InternalClusterInfoService;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n-import org.elasticsearch.cluster.node.DiscoveryNode;\n-import org.elasticsearch.cluster.routing.RoutingTable;\n-import org.elasticsearch.cluster.routing.UnassignedInfo;\n import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.index.IndexNotFoundException;\n import org.elasticsearch.index.query.RangeQueryBuilder;\n-import org.elasticsearch.index.query.TermsQueryBuilder;\n import org.elasticsearch.test.ESIntegTestCase;\n import org.elasticsearch.test.ESIntegTestCase.ClusterScope;\n import org.elasticsearch.test.ESIntegTestCase.Scope;\n@@ -53,7 +46,6 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;\n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n import static org.hamcrest.Matchers.allOf;\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n@@ -63,6 +55,7 @@\n \n @ClusterScope(scope = Scope.TEST)\n public class CreateIndexIT extends ESIntegTestCase {\n+\n public void testCreationDateGivenFails() {\n try {\n prepareCreate(\"test\").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4L)).get();\n@@ -288,192 +281,6 @@ public void testRestartIndexCreationAfterFullClusterRestart() throws Exception {\n ensureGreen(\"test\");\n }\n \n- public void testCreateShrinkIndexToN() {\n- int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};\n- int[] shardSplits = randomFrom(possibleShardSplits);\n- assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]);\n- assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]);\n- internalCluster().ensureAtLeastNumDataNodes(2);\n- prepareCreate(\"source\").setSettings(Settings.builder().put(indexSettings()).put(\"number_of_shards\", shardSplits[0])).get();\n- for (int i = 0; i < 20; i++) {\n- client().prepareIndex(\"source\", \"t1\", Integer.toString(i)).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n- }\n- ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()\n- .getDataNodes();\n- assertTrue(\"at least 2 nodes but was: \" + dataNodes.size(), dataNodes.size() >= 2);\n- DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);\n- String mergeNode = discoveryNodes[0].getName();\n- // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node\n- // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due\n- // to the require._name below.\n- ensureGreen();\n- // relocate all shards to one node such that we can merge it.\n- client().admin().indices().prepareUpdateSettings(\"source\")\n- .setSettings(Settings.builder()\n- .put(\"index.routing.allocation.require._name\", mergeNode)\n- .put(\"index.blocks.write\", true)).get();\n- ensureGreen();\n- // now merge source into a 4 shard index\n- assertAcked(client().admin().indices().prepareShrinkIndex(\"source\", \"first_shrink\")\n- .setSettings(Settings.builder()\n- .put(\"index.number_of_replicas\", 0)\n- .put(\"index.number_of_shards\", shardSplits[1]).build()).get());\n- ensureGreen();\n- assertHitCount(client().prepareSearch(\"first_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n-\n- for (int i = 0; i < 20; i++) { // now update\n- client().prepareIndex(\"first_shrink\", \"t1\", Integer.toString(i)).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n- }\n- flushAndRefresh();\n- assertHitCount(client().prepareSearch(\"first_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- assertHitCount(client().prepareSearch(\"source\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n-\n- // relocate all shards to one node such that we can merge it.\n- client().admin().indices().prepareUpdateSettings(\"first_shrink\")\n- .setSettings(Settings.builder()\n- .put(\"index.routing.allocation.require._name\", mergeNode)\n- .put(\"index.blocks.write\", true)).get();\n- ensureGreen();\n- // now merge source into a 2 shard index\n- assertAcked(client().admin().indices().prepareShrinkIndex(\"first_shrink\", \"second_shrink\")\n- .setSettings(Settings.builder()\n- .put(\"index.number_of_replicas\", 0)\n- .put(\"index.number_of_shards\", shardSplits[2]).build()).get());\n- ensureGreen();\n- assertHitCount(client().prepareSearch(\"second_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- // let it be allocated anywhere and bump replicas\n- client().admin().indices().prepareUpdateSettings(\"second_shrink\")\n- .setSettings(Settings.builder()\n- .putNull(\"index.routing.allocation.include._id\")\n- .put(\"index.number_of_replicas\", 1)).get();\n- ensureGreen();\n- assertHitCount(client().prepareSearch(\"second_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n-\n- for (int i = 0; i < 20; i++) { // now update\n- client().prepareIndex(\"second_shrink\", \"t1\", Integer.toString(i)).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n- }\n- flushAndRefresh();\n- assertHitCount(client().prepareSearch(\"second_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- assertHitCount(client().prepareSearch(\"first_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- assertHitCount(client().prepareSearch(\"source\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- }\n-\n- public void testCreateShrinkIndex() {\n- internalCluster().ensureAtLeastNumDataNodes(2);\n- prepareCreate(\"source\").setSettings(Settings.builder().put(indexSettings()).put(\"number_of_shards\", randomIntBetween(2, 7))).get();\n- for (int i = 0; i < 20; i++) {\n- client().prepareIndex(\"source\", randomFrom(\"t1\", \"t2\", \"t3\")).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n- }\n- ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()\n- .getDataNodes();\n- assertTrue(\"at least 2 nodes but was: \" + dataNodes.size(), dataNodes.size() >= 2);\n- DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);\n- String mergeNode = discoveryNodes[0].getName();\n- // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node\n- // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due\n- // to the require._name below.\n- ensureGreen();\n- // relocate all shards to one node such that we can merge it.\n- client().admin().indices().prepareUpdateSettings(\"source\")\n- .setSettings(Settings.builder()\n- .put(\"index.routing.allocation.require._name\", mergeNode)\n- .put(\"index.blocks.write\", true)).get();\n- ensureGreen();\n- // now merge source into a single shard index\n-\n- final boolean createWithReplicas = randomBoolean();\n- assertAcked(client().admin().indices().prepareShrinkIndex(\"source\", \"target\")\n- .setSettings(Settings.builder().put(\"index.number_of_replicas\", createWithReplicas ? 1 : 0).build()).get());\n- ensureGreen();\n- assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n-\n- if (createWithReplicas == false) {\n- // bump replicas\n- client().admin().indices().prepareUpdateSettings(\"target\")\n- .setSettings(Settings.builder()\n- .put(\"index.number_of_replicas\", 1)).get();\n- ensureGreen();\n- assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- }\n-\n- for (int i = 20; i < 40; i++) {\n- client().prepareIndex(\"target\", randomFrom(\"t1\", \"t2\", \"t3\")).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n- }\n- flushAndRefresh();\n- assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 40);\n- assertHitCount(client().prepareSearch(\"source\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n-\n- }\n- /**\n- * Tests that we can manually recover from a failed allocation due to shards being moved away etc.\n- */\n- public void testCreateShrinkIndexFails() throws Exception {\n- internalCluster().ensureAtLeastNumDataNodes(2);\n- prepareCreate(\"source\").setSettings(Settings.builder().put(indexSettings())\n- .put(\"number_of_shards\", randomIntBetween(2, 7))\n- .put(\"number_of_replicas\", 0)).get();\n- for (int i = 0; i < 20; i++) {\n- client().prepareIndex(\"source\", randomFrom(\"t1\", \"t2\", \"t3\")).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n- }\n- ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()\n- .getDataNodes();\n- assertTrue(\"at least 2 nodes but was: \" + dataNodes.size(), dataNodes.size() >= 2);\n- DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);\n- String spareNode = discoveryNodes[0].getName();\n- String mergeNode = discoveryNodes[1].getName();\n- // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node\n- // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due\n- // to the require._name below.\n- ensureGreen();\n- // relocate all shards to one node such that we can merge it.\n- client().admin().indices().prepareUpdateSettings(\"source\")\n- .setSettings(Settings.builder().put(\"index.routing.allocation.require._name\", mergeNode)\n- .put(\"index.blocks.write\", true)).get();\n- ensureGreen();\n-\n- // now merge source into a single shard index\n- client().admin().indices().prepareShrinkIndex(\"source\", \"target\")\n- .setSettings(Settings.builder()\n- .put(\"index.routing.allocation.exclude._name\", mergeNode) // we manually exclude the merge node to forcefully fuck it up\n- .put(\"index.number_of_replicas\", 0)\n- .put(\"index.allocation.max_retries\", 1).build()).get();\n-\n- // now we move all shards away from the merge node\n- client().admin().indices().prepareUpdateSettings(\"source\")\n- .setSettings(Settings.builder().put(\"index.routing.allocation.require._name\", spareNode)\n- .put(\"index.blocks.write\", true)).get();\n- ensureGreen(\"source\");\n-\n- client().admin().indices().prepareUpdateSettings(\"target\") // erase the forcefully fuckup!\n- .setSettings(Settings.builder().putNull(\"index.routing.allocation.exclude._name\")).get();\n- // wait until it fails\n- assertBusy(() -> {\n- ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();\n- RoutingTable routingTables = clusterStateResponse.getState().routingTable();\n- assertTrue(routingTables.index(\"target\").shard(0).getShards().get(0).unassigned());\n- assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED,\n- routingTables.index(\"target\").shard(0).getShards().get(0).unassignedInfo().getReason());\n- assertEquals(1,\n- routingTables.index(\"target\").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());\n- });\n- client().admin().indices().prepareUpdateSettings(\"source\") // now relocate them all to the right node\n- .setSettings(Settings.builder()\n- .put(\"index.routing.allocation.require._name\", mergeNode)).get();\n- ensureGreen(\"source\");\n-\n- final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class,\n- internalCluster().getMasterName());\n- infoService.refresh();\n- // kick off a retry and wait until it's done!\n- ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();\n- long expectedShardSize = clusterRerouteResponse.getState().routingTable().index(\"target\")\n- .shard(0).getShards().get(0).getExpectedShardSize();\n- // we support the expected shard size in the allocator to sum up over the source index shards\n- assertTrue(\"expected shard size must be set but wasn't: \" + expectedShardSize, expectedShardSize > 0);\n- ensureGreen();\n- assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n- }\n-\n /**\n * This test ensures that index creation adheres to the {@link IndexMetaData#SETTING_WAIT_FOR_ACTIVE_SHARDS}.\n */", "filename": "core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java", "status": "modified" }, { "diff": "@@ -0,0 +1,246 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.action.admin.indices.create;\n+\n+import org.elasticsearch.Version;\n+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;\n+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;\n+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;\n+import org.elasticsearch.action.support.ActiveShardCount;\n+import org.elasticsearch.cluster.ClusterInfoService;\n+import org.elasticsearch.cluster.InternalClusterInfoService;\n+import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.cluster.routing.RoutingTable;\n+import org.elasticsearch.cluster.routing.UnassignedInfo;\n+import org.elasticsearch.common.Priority;\n+import org.elasticsearch.common.collect.ImmutableOpenMap;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.query.TermsQueryBuilder;\n+import org.elasticsearch.plugins.Plugin;\n+import org.elasticsearch.test.ESIntegTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n+\n+import java.util.Arrays;\n+import java.util.Collection;\n+\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n+\n+public class ShrinkIndexIT extends ESIntegTestCase {\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> nodePlugins() {\n+ return Arrays.asList(InternalSettingsPlugin.class);\n+ }\n+\n+ public void testCreateShrinkIndexToN() {\n+ int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};\n+ int[] shardSplits = randomFrom(possibleShardSplits);\n+ assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]);\n+ assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]);\n+ internalCluster().ensureAtLeastNumDataNodes(2);\n+ prepareCreate(\"source\").setSettings(Settings.builder().put(indexSettings()).put(\"number_of_shards\", shardSplits[0])).get();\n+ for (int i = 0; i < 20; i++) {\n+ client().prepareIndex(\"source\", \"t1\", Integer.toString(i)).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n+ }\n+ ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()\n+ .getDataNodes();\n+ assertTrue(\"at least 2 nodes but was: \" + dataNodes.size(), dataNodes.size() >= 2);\n+ DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);\n+ String mergeNode = discoveryNodes[0].getName();\n+ // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node\n+ // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due\n+ // to the require._name below.\n+ ensureGreen();\n+ // relocate all shards to one node such that we can merge it.\n+ client().admin().indices().prepareUpdateSettings(\"source\")\n+ .setSettings(Settings.builder()\n+ .put(\"index.routing.allocation.require._name\", mergeNode)\n+ .put(\"index.blocks.write\", true)).get();\n+ ensureGreen();\n+ // now merge source into a 4 shard index\n+ assertAcked(client().admin().indices().prepareShrinkIndex(\"source\", \"first_shrink\")\n+ .setSettings(Settings.builder()\n+ .put(\"index.number_of_replicas\", 0)\n+ .put(\"index.number_of_shards\", shardSplits[1]).build()).get());\n+ ensureGreen();\n+ assertHitCount(client().prepareSearch(\"first_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+\n+ for (int i = 0; i < 20; i++) { // now update\n+ client().prepareIndex(\"first_shrink\", \"t1\", Integer.toString(i)).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n+ }\n+ flushAndRefresh();\n+ assertHitCount(client().prepareSearch(\"first_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ assertHitCount(client().prepareSearch(\"source\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+\n+ // relocate all shards to one node such that we can merge it.\n+ client().admin().indices().prepareUpdateSettings(\"first_shrink\")\n+ .setSettings(Settings.builder()\n+ .put(\"index.routing.allocation.require._name\", mergeNode)\n+ .put(\"index.blocks.write\", true)).get();\n+ ensureGreen();\n+ // now merge source into a 2 shard index\n+ assertAcked(client().admin().indices().prepareShrinkIndex(\"first_shrink\", \"second_shrink\")\n+ .setSettings(Settings.builder()\n+ .put(\"index.number_of_replicas\", 0)\n+ .put(\"index.number_of_shards\", shardSplits[2]).build()).get());\n+ ensureGreen();\n+ assertHitCount(client().prepareSearch(\"second_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ // let it be allocated anywhere and bump replicas\n+ client().admin().indices().prepareUpdateSettings(\"second_shrink\")\n+ .setSettings(Settings.builder()\n+ .putNull(\"index.routing.allocation.include._id\")\n+ .put(\"index.number_of_replicas\", 1)).get();\n+ ensureGreen();\n+ assertHitCount(client().prepareSearch(\"second_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+\n+ for (int i = 0; i < 20; i++) { // now update\n+ client().prepareIndex(\"second_shrink\", \"t1\", Integer.toString(i)).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n+ }\n+ flushAndRefresh();\n+ assertHitCount(client().prepareSearch(\"second_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ assertHitCount(client().prepareSearch(\"first_shrink\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ assertHitCount(client().prepareSearch(\"source\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ }\n+\n+ public void testCreateShrinkIndex() {\n+ internalCluster().ensureAtLeastNumDataNodes(2);\n+ Version version = VersionUtils.randomVersion(random());\n+ prepareCreate(\"source\").setSettings(Settings.builder().put(indexSettings())\n+ .put(\"number_of_shards\", randomIntBetween(2, 7))\n+ .put(\"index.version.created\", version)\n+ ).get();\n+ for (int i = 0; i < 20; i++) {\n+ client().prepareIndex(\"source\", randomFrom(\"t1\", \"t2\", \"t3\")).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n+ }\n+ ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()\n+ .getDataNodes();\n+ assertTrue(\"at least 2 nodes but was: \" + dataNodes.size(), dataNodes.size() >= 2);\n+ DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);\n+ String mergeNode = discoveryNodes[0].getName();\n+ // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node\n+ // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due\n+ // to the require._name below.\n+ ensureGreen();\n+ // relocate all shards to one node such that we can merge it.\n+ client().admin().indices().prepareUpdateSettings(\"source\")\n+ .setSettings(Settings.builder()\n+ .put(\"index.routing.allocation.require._name\", mergeNode)\n+ .put(\"index.blocks.write\", true)).get();\n+ ensureGreen();\n+ // now merge source into a single shard index\n+\n+ final boolean createWithReplicas = randomBoolean();\n+ assertAcked(client().admin().indices().prepareShrinkIndex(\"source\", \"target\")\n+ .setSettings(Settings.builder().put(\"index.number_of_replicas\", createWithReplicas ? 1 : 0).build()).get());\n+ ensureGreen();\n+ assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+\n+ if (createWithReplicas == false) {\n+ // bump replicas\n+ client().admin().indices().prepareUpdateSettings(\"target\")\n+ .setSettings(Settings.builder()\n+ .put(\"index.number_of_replicas\", 1)).get();\n+ ensureGreen();\n+ assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ }\n+\n+ for (int i = 20; i < 40; i++) {\n+ client().prepareIndex(\"target\", randomFrom(\"t1\", \"t2\", \"t3\")).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n+ }\n+ flushAndRefresh();\n+ assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 40);\n+ assertHitCount(client().prepareSearch(\"source\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ GetSettingsResponse target = client().admin().indices().prepareGetSettings(\"target\").get();\n+ assertEquals(version, target.getIndexToSettings().get(\"target\").getAsVersion(\"index.version.created\", null));\n+ }\n+ /**\n+ * Tests that we can manually recover from a failed allocation due to shards being moved away etc.\n+ */\n+ public void testCreateShrinkIndexFails() throws Exception {\n+ internalCluster().ensureAtLeastNumDataNodes(2);\n+ prepareCreate(\"source\").setSettings(Settings.builder().put(indexSettings())\n+ .put(\"number_of_shards\", randomIntBetween(2, 7))\n+ .put(\"number_of_replicas\", 0)).get();\n+ for (int i = 0; i < 20; i++) {\n+ client().prepareIndex(\"source\", randomFrom(\"t1\", \"t2\", \"t3\")).setSource(\"{\\\"foo\\\" : \\\"bar\\\", \\\"i\\\" : \" + i + \"}\").get();\n+ }\n+ ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()\n+ .getDataNodes();\n+ assertTrue(\"at least 2 nodes but was: \" + dataNodes.size(), dataNodes.size() >= 2);\n+ DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);\n+ String spareNode = discoveryNodes[0].getName();\n+ String mergeNode = discoveryNodes[1].getName();\n+ // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node\n+ // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due\n+ // to the require._name below.\n+ ensureGreen();\n+ // relocate all shards to one node such that we can merge it.\n+ client().admin().indices().prepareUpdateSettings(\"source\")\n+ .setSettings(Settings.builder().put(\"index.routing.allocation.require._name\", mergeNode)\n+ .put(\"index.blocks.write\", true)).get();\n+ ensureGreen();\n+\n+ // now merge source into a single shard index\n+ client().admin().indices().prepareShrinkIndex(\"source\", \"target\")\n+ .setWaitForActiveShards(ActiveShardCount.NONE)\n+ .setSettings(Settings.builder()\n+ .put(\"index.routing.allocation.exclude._name\", mergeNode) // we manually exclude the merge node to forcefully fuck it up\n+ .put(\"index.number_of_replicas\", 0)\n+ .put(\"index.allocation.max_retries\", 1).build()).get();\n+ client().admin().cluster().prepareHealth(\"target\").setWaitForEvents(Priority.LANGUID).get();\n+\n+ // now we move all shards away from the merge node\n+ client().admin().indices().prepareUpdateSettings(\"source\")\n+ .setSettings(Settings.builder().put(\"index.routing.allocation.require._name\", spareNode)\n+ .put(\"index.blocks.write\", true)).get();\n+ ensureGreen(\"source\");\n+\n+ client().admin().indices().prepareUpdateSettings(\"target\") // erase the forcefully fuckup!\n+ .setSettings(Settings.builder().putNull(\"index.routing.allocation.exclude._name\")).get();\n+ // wait until it fails\n+ assertBusy(() -> {\n+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();\n+ RoutingTable routingTables = clusterStateResponse.getState().routingTable();\n+ assertTrue(routingTables.index(\"target\").shard(0).getShards().get(0).unassigned());\n+ assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED,\n+ routingTables.index(\"target\").shard(0).getShards().get(0).unassignedInfo().getReason());\n+ assertEquals(1,\n+ routingTables.index(\"target\").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());\n+ });\n+ client().admin().indices().prepareUpdateSettings(\"source\") // now relocate them all to the right node\n+ .setSettings(Settings.builder()\n+ .put(\"index.routing.allocation.require._name\", mergeNode)).get();\n+ ensureGreen(\"source\");\n+\n+ final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class,\n+ internalCluster().getMasterName());\n+ infoService.refresh();\n+ // kick off a retry and wait until it's done!\n+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();\n+ long expectedShardSize = clusterRerouteResponse.getState().routingTable().index(\"target\")\n+ .shard(0).getShards().get(0).getExpectedShardSize();\n+ // we support the expected shard size in the allocator to sum up over the source index shards\n+ assertTrue(\"expected shard size must be set but wasn't: \" + expectedShardSize, expectedShardSize > 0);\n+ ensureGreen();\n+ assertHitCount(client().prepareSearch(\"target\").setSize(100).setQuery(new TermsQueryBuilder(\"foo\", \"bar\")).get(), 20);\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java", "status": "added" }, { "diff": "@@ -38,13 +38,16 @@\n import org.elasticsearch.ResourceAlreadyExistsException;\n import org.elasticsearch.indices.InvalidIndexNameException;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.test.VersionUtils;\n import org.elasticsearch.test.gateway.TestGatewayAllocator;\n \n import java.util.Arrays;\n import java.util.Collections;\n import java.util.HashSet;\n+import java.util.List;\n \n import static java.util.Collections.emptyMap;\n+import static java.util.Collections.min;\n import static org.hamcrest.Matchers.endsWith;\n \n public class MetaDataCreateIndexServiceTests extends ESTestCase {\n@@ -150,11 +153,20 @@ public void testValidateShrinkIndex() {\n \n public void testShrinkIndexSettings() {\n String indexName = randomAsciiOfLength(10);\n+ List<Version> versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()),\n+ VersionUtils.randomVersion(random()));\n+ versions.sort((l, r) -> Long.compare(l.id, r.id));\n+ Version version = versions.get(0);\n+ Version minCompat = versions.get(1);\n+ Version upgraded = versions.get(2);\n // create one that won't fail\n ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0,\n Settings.builder()\n .put(\"index.blocks.write\", true)\n .put(\"index.similarity.default.type\", \"BM25\")\n+ .put(\"index.version.created\", version)\n+ .put(\"index.version.upgraded\", upgraded)\n+ .put(\"index.version.minimum_compatible\", minCompat.luceneVersion)\n .put(\"index.analysis.analyzer.my_analyzer.tokenizer\", \"keyword\")\n .build())).nodes(DiscoveryNodes.builder().add(newNode(\"node1\")))\n .build();\n@@ -177,6 +189,10 @@ public void testShrinkIndexSettings() {\n \"keyword\", builder.build().get(\"index.analysis.analyzer.my_analyzer.tokenizer\"));\n assertEquals(\"node1\", builder.build().get(\"index.routing.allocation.initial_recovery._id\"));\n assertEquals(\"1\", builder.build().get(\"index.allocation.max_retries\"));\n+ assertEquals(version, builder.build().getAsVersion(\"index.version.created\", null));\n+ assertEquals(upgraded, builder.build().getAsVersion(\"index.version.upgraded\", null));\n+ assertEquals(minCompat.luceneVersion.toString(), builder.build().get(\"index.version.minimum_compatible\", null));\n+\n }\n \n private DiscoveryNode newNode(String nodeId) {", "filename": "core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java", "status": "modified" } ] }
{ "body": "Today we add a prefix when logging within Elasticsearch. This prefix\ncontains the node name, and index and shard-level components if\nappropriate.\n\nDue to some implementation details with Log4j 2 , this does not work for\nintegration tests; instead what we see is the node name for the last\nnode to startup. The implementation detail here is that Log4j 2 there is\nonly one logger for a name, message factory pair, and the key derived\nfrom the message factory is the class name of the message factory. So,\nwhen the last node starts up and starts setting prefixes on its message\nfactories, it will impact the loggers for the other nodes.\n\nAdditionally, the prefixes are lost when logging an exception. This is\ndue to another implementation detail in Log4j 2. Namely, since we log\nexceptions using a parameterized message, Log4j 2 decides that that\nmeans that we do not want to use the message factory that we have\nprovided (the prefix message factory) and so logs the exception without\nthe prefix.\n\nThis commit fixes both of these issues.\n", "comments": [ { "body": "I'm not an expert here, but the approach looks like the best we had so far. Left some minor comments.\n", "created_at": "2016-09-12T20:05:18Z" }, { "body": "LGTM. Thanks @jasontedor . Another fun adventure.\n", "created_at": "2016-09-13T18:44:53Z" }, { "body": "Thank you @bleskes and @mikemccand for careful reviews.\n", "created_at": "2016-09-13T18:46:08Z" } ], "number": 20429, "title": "Fix prefix logging" }
{ "body": "We have a custom logger implementation known as a prefix logger that is used to write every message by the logger with a given prefix. This is useful for node-level, index-level, and shard-level messages where we want to log the node name, index name, and shard ID, respectively, if possible. The mechanism that we employ is that of a marker. Log4j has a built-in facility for managing these markers, but its effectively a memory leak because these markers are held in a map and can never be released. This is problematic for us since indices and shards do not necessarily have infinite life spans and so on a node where there are many indices being creted and destroyed, this infinite lifespan can be a problem indeed. To solve this, we use our own cache of markers. This is necessary to prevent too many instances of the marker for the same prefix from being created (just think of all the shard-level components that exist in the system), and to workaround the effective leak in Log4j. These markers are stored as weak references in a weak hash map. It is these weak references that are unneeded. When a key is removed from a weak hash map, the corresponding entry is placed on a reference queue that is eventually cleared. This commit simplifies prefix logger by removing this unnecessary weak reference wrapper.\r\n\r\nRelates #20429", "number": 22460, "review_comments": [ { "body": "Maybe move the limit out to a local, with a comment on the reason for the size (I presume to ensure System.gc() actually collects something below).", "created_at": "2017-01-06T19:26:48Z" } ], "title": "Remove unneeded weak reference from prefix logger" }
{ "commits": [ { "message": "Remove unneeded weak reference from prefix logger\n\nWe have a custom logger implementation known as a prefix logger that is\nused to write every message by the logger with a given prefix. This is\nuseful for node-level, index-level, and shard-level messages where we\nwant to log the node name, index name, and shard ID, respectively, if\npossible. The mechanism that we employ is that of a marker. Log4j has a\nbuilt-in facility for managing these markers, but its effectively a\nmemory leak because these markers are held in a map and can never be\nreleased. This is problematic for us since indices and shards do not\nnecessarily have infinite life spans and so on a node where there are\nmany indices being creted and destroyed, this infinite lifespan can be a\nproblem indeed. To solve this, we use our own cache of markers. This is\nnecessary to prevent too many instances of the marker for the same\nprefix from being created (just think of all the shard-level components\nthat exist in the system), and to workaround the effective leak in\nLog4j. These markers are stored as weak references in a weak hash\nmap. It is these weak references that are unneeded. When a key is\nremoved from a weak hash map, the corresponding entry is placed on a\nreference queue that is eventually cleared. This commit simplifies\nprefix logger by removing this unnecessary weak reference wrapper." }, { "message": "Merge branch 'master' into simplify-prefix-logger\n\n* master: (1889 commits)\n Test: remove faling test that relies on merge order\n Log checkout so SHA is known\n Add link to community Rust Client (#22897)\n \"shard started\" should show index and shard ID (#25157)\n await fix testWithRandomException\n Change BWC versions on create index response\n Return the index name on a create index response\n Remove incorrect bwc branch logic from master\n Correctly format arrays in output\n [Test] Extending parsing checks for SearchResponse (#25148)\n Scripting: Change keys for inline/stored scripts to source/id (#25127)\n [Test] Add test for custom requests in High Level Rest Client (#25106)\n nested: In case of a single type the _id field should be added to the nested document instead of _uid field.\n `type` and `id` are lost upon serialization of `Translog.Delete`. (#24586)\n fix highlighting docs\n Fix NPE in token_count datatype with null value (#25046)\n Remove the postings highlighter and make unified the default highlighter choice (#25028)\n [Test] Adding test for parsing SearchShardFailure leniently (#25144)\n Fix typo in shards.asciidoc (#25143)\n List Hibernate Search (#25145)\n ..." }, { "message": "Feedback" } ], "files": [ { "diff": "@@ -26,34 +26,69 @@\n import org.apache.logging.log4j.spi.ExtendedLogger;\n import org.apache.logging.log4j.spi.ExtendedLoggerWrapper;\n \n-import java.lang.ref.WeakReference;\n import java.util.WeakHashMap;\n \n+/**\n+ * A logger that prefixes all messages with a fixed prefix specified during construction. The prefix mechanism uses the marker construct, so\n+ * for the prefixes to appear, the logging layout pattern must include the marker in its pattern.\n+ */\n class PrefixLogger extends ExtendedLoggerWrapper {\n \n- // we can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds\n- // a permanent reference to the marker; however, we have transient markers from index-level and\n- // shard-level components so this would effectively be a memory leak\n- private static final WeakHashMap<String, WeakReference<Marker>> markers = new WeakHashMap<>();\n+ /*\n+ * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker;\n+ * however, we have transient markers from index-level and shard-level components so this would effectively be a memory leak. Since we\n+ * can not tie into the lifecycle of these components, we have to use a mechanism that enables garbage collection of such markers when\n+ * they are no longer in use.\n+ */\n+ private static final WeakHashMap<String, Marker> markers = new WeakHashMap<>();\n+\n+ /**\n+ * Return the size of the cached markers. This size can vary as markers are cached but collected during GC activity when a given prefix\n+ * is no longer in use.\n+ *\n+ * @return the size of the cached markers\n+ */\n+ static int markersSize() {\n+ return markers.size();\n+ }\n \n+ /**\n+ * The marker for this prefix logger.\n+ */\n private final Marker marker;\n \n+ /**\n+ * Obtain the prefix for this prefix logger. This can be used to create a logger with the same prefix as this one.\n+ *\n+ * @return the prefix\n+ */\n public String prefix() {\n return marker.getName();\n }\n \n+ /**\n+ * Construct a prefix logger with the specified name and prefix.\n+ *\n+ * @param logger the extended logger to wrap\n+ * @param name the name of this prefix logger\n+ * @param prefix the prefix for this prefix logger\n+ */\n PrefixLogger(final ExtendedLogger logger, final String name, final String prefix) {\n super(logger, name, null);\n \n final String actualPrefix = (prefix == null ? \"\" : prefix).intern();\n final Marker actualMarker;\n // markers is not thread-safe, so we synchronize access\n synchronized (markers) {\n- final WeakReference<Marker> marker = markers.get(actualPrefix);\n- final Marker maybeMarker = marker == null ? null : marker.get();\n+ final Marker maybeMarker = markers.get(actualPrefix);\n if (maybeMarker == null) {\n actualMarker = new MarkerManager.Log4jMarker(actualPrefix);\n- markers.put(actualPrefix, new WeakReference<>(actualMarker));\n+ /*\n+ * We must create a new instance here as otherwise the marker will hold a reference to the key in the weak hash map; as\n+ * those references are held strongly, this would give a strong reference back to the key preventing them from ever being\n+ * collected. This also guarantees that no other strong reference can be held to the prefix anywhere.\n+ */\n+ markers.put(new String(actualPrefix), actualMarker);\n } else {\n actualMarker = maybeMarker;\n }", "filename": "core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java", "status": "modified" }, { "diff": "@@ -47,6 +47,7 @@\n import java.util.regex.Pattern;\n \n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.lessThan;\n import static org.hamcrest.Matchers.startsWith;\n \n public class EvilLoggerTests extends ESTestCase {\n@@ -157,6 +158,20 @@ public void testPrefixLogger() throws IOException, IllegalAccessException, UserE\n }\n }\n \n+ public void testPrefixLoggerMarkersCanBeCollected() throws IOException, UserException {\n+ setupLogging(\"prefix\");\n+\n+ final int prefixes = 1 << 19; // to ensure enough markers that the GC should collect some when we force a GC below\n+ for (int i = 0; i < prefixes; i++) {\n+ // this has the side effect of caching a marker with this prefix\n+ Loggers.getLogger(\"prefix\" + i, \"prefix\" + i);\n+ }\n+\n+ // this will free the weakly referenced keys in the marker cache\n+ System.gc();\n+ assertThat(PrefixLogger.markersSize(), lessThan(prefixes));\n+ }\n+\n public void testProperties() throws IOException, UserException {\n final Settings.Builder builder = Settings.builder().put(\"cluster.name\", randomAlphaOfLength(16));\n if (randomBoolean()) {", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java", "status": "modified" } ] }
{ "body": "Recoveries are tracked on the target node using RecoveryTarget objects that are kept in a RecoveriesCollection. Each recovery has a unique id that is communicated from the recovery target to the source so that it can call back to the target and execute actions using the right recovery context. In case of a network disconnect, recoveries are retried. At the moment, the same recovery id is reused for the restarted recovery. This can lead to confusion though if the disconnect is unilateral and the recovery source continues with the recovery process. If the target reuses the same recovery id while doing a second attempt, there might be two concurrent recoveries running on the source for the same target.\r\n\r\nThis PR changes the recovery retry process to use a fresh recovery id. It also waits for the first recovery attempt to be fully finished (all resources locally freed) to further prevent concurrent access to the shard. Finally, in case of primary relocation, it also fails a second recovery attempt if the first attempt moved past the finalization step, as the relocation source can then be moved to RELOCATED state and start indexing as primary into the target shard (see TransportReplicationAction). Resetting the target shard in this state could mean that indexing is halted until the recovery retry attempt is completed and could also destroy existing documents indexed and acknowledged before the reset.\r\n\r\nRelates to #22043", "comments": [ { "body": "Thanks @bleskes ", "created_at": "2016-12-29T09:58:21Z" }, { "body": "I've pushed 816e1c6 which fixes an issue uncovered by a CI run:\r\nhttps://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-unix-compatibility/os=debian/459/consoleFull\r\n", "created_at": "2016-12-29T16:07:30Z" } ], "number": 22325, "title": "Use a fresh recovery id when retrying recoveries" }
{ "body": "#22325 changed the recovery retry logic to use unique recovery ids. The change also introduced an issue, however, which made it possible for the shard store to be closed under CancellableThreads, triggering assertions in the node locking logic. This PR limits the use of CancellableThreads only to the part where we wait on the old recovery target to be closed.\r\n\r\nTest failure: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+5.x+multijob-darwin-compatibility/106/consoleFull\r\n\r\nLog output:\r\n\r\n```\r\n1> [2017-01-04T14:29:36,961][WARN ][o.e.e.NodeEnvironment ] [node_sd2] lock assertion failed\r\n 1> java.nio.channels.ClosedByInterruptException: null\r\n 1> \tat java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:202) ~[?:1.8.0_60]\r\n 1> \tat sun.nio.ch.FileChannelImpl.size(FileChannelImpl.java:315) ~[?:?]\r\n 1> \tat org.apache.lucene.mockfile.FilterFileChannel.size(FilterFileChannel.java:85) ~[lucene-test-framework-6.4.0-snapshot-ec38570.jar:6.4.0-snapshot-ec38570 ec385708c6e0c47440127410c1223f14703c24e1 - jim - 2016-11-29 01:11:32]\r\n 1> \tat org.apache.lucene.mockfile.FilterFileChannel.size(FilterFileChannel.java:85) ~[lucene-test-framework-6.4.0-snapshot-ec38570.jar:6.4.0-snapshot-ec38570 ec385708c6e0c47440127410c1223f14703c24e1 - jim - 2016-11-29 01:11:32]\r\n 1> \tat org.apache.lucene.mockfile.FilterFileChannel.size(FilterFileChannel.java:85) ~[lucene-test-framework-6.4.0-snapshot-ec38570.jar:6.4.0-snapshot-ec38570 ec385708c6e0c47440127410c1223f14703c24e1 - jim - 2016-11-29 01:11:32]\r\n 1> \tat org.apache.lucene.store.NativeFSLockFactory$NativeFSLock.ensureValid(NativeFSLockFactory.java:170) ~[lucene-core-6.4.0-snapshot-ec38570.jar:6.4.0-snapshot-ec38570 ec385708c6e0c47440127410c1223f14703c24e1 - jim - 2016-11-29 01:11:32]\r\n 1> \tat org.elasticsearch.env.NodeEnvironment.assertEnvIsLocked(NodeEnvironment.java:902) ~[main/:?]\r\n 1> \tat org.elasticsearch.env.NodeEnvironment.availableShardPaths(NodeEnvironment.java:782) ~[main/:?]\r\n 1> \tat org.elasticsearch.env.NodeEnvironment.deleteShardDirectoryUnderLock(NodeEnvironment.java:492) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.IndicesService.deleteShardStore(IndicesService.java:657) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.IndexService.onShardClose(IndexService.java:442) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.IndexService.access$100(IndexService.java:93) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.IndexService$StoreCloseListener.handle(IndexService.java:524) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.IndexService$StoreCloseListener.handle(IndexService.java:509) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.store.Store.closeInternal(Store.java:366) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.store.Store.access$000(Store.java:126) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.store.Store$1.closeInternal(Store.java:147) ~[main/:?]\r\n 1> \tat org.elasticsearch.common.util.concurrent.AbstractRefCounted.decRef(AbstractRefCounted.java:64) ~[main/:?]\r\n 1> \tat org.elasticsearch.index.store.Store.decRef(Store.java:348) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.RecoveryTarget.closeInternal(RecoveryTarget.java:330) ~[main/:?]\r\n 1> \tat org.elasticsearch.common.util.concurrent.AbstractRefCounted.decRef(AbstractRefCounted.java:64) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.RecoveryTarget.resetRecovery(RecoveryTarget.java:193) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.RecoveriesCollection.lambda$resetRecovery$306(RecoveriesCollection.java:113) ~[main/:?]\r\n 1> \tat org.elasticsearch.common.util.CancellableThreads.executeIO(CancellableThreads.java:105) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.RecoveriesCollection.resetRecovery(RecoveriesCollection.java:113) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.PeerRecoveryTargetService.retryRecovery(PeerRecoveryTargetService.java:156) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.PeerRecoveryTargetService.retryRecovery(PeerRecoveryTargetService.java:152) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.PeerRecoveryTargetService.doRecovery(PeerRecoveryTargetService.java:289) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.PeerRecoveryTargetService.access$900(PeerRecoveryTargetService.java:73) ~[main/:?]\r\n 1> \tat org.elasticsearch.indices.recovery.PeerRecoveryTargetService$RecoveryRunner.doRun(PeerRecoveryTargetService.java:555) ~[main/:?]\r\n 1> \tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\r\n 1> \tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\r\n 1> \tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\r\n 1> \tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\r\n 1> \tat java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\r\n```", "number": 22434, "review_comments": [ { "body": "instead of the passing `activityTimeout` can't we just use the internal cancelableThreads and only execute that latch await under in a cancelable fashion? ", "created_at": "2017-01-04T22:35:46Z" }, { "body": "yes, it has to be the CancellableThreads of the new recovery target though (as only that one still has a RecoveryMonitor registered). I've pushed bc60c9 to address this", "created_at": "2017-01-05T09:28:00Z" } ], "title": "Don't close store under CancellableThreads" }
{ "commits": [ { "message": "Don't close store under CancellableThreads" }, { "message": "use cancellablethreads of new target to early terminate waiting on old target to close" } ], "files": [ { "diff": "@@ -107,25 +107,17 @@ public RecoveryTarget resetRecovery(final long recoveryId, TimeValue activityTim\n }\n \n // Closes the current recovery target\n- final AtomicBoolean successfulReset = new AtomicBoolean();\n- try {\n- final RecoveryTarget finalOldRecoveryTarget = oldRecoveryTarget;\n- newRecoveryTarget.CancellableThreads().executeIO(() -> successfulReset.set(finalOldRecoveryTarget.resetRecovery()));\n- } catch (CancellableThreads.ExecutionCancelledException e) {\n- // new recovery target is already cancelled (probably due to shard closing or recovery source changing)\n- assert onGoingRecoveries.containsKey(newRecoveryTarget.recoveryId()) == false;\n- logger.trace(\"{} recovery reset cancelled, recovery from {}, id [{}], previous id [{}]\", newRecoveryTarget.shardId(),\n- newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), oldRecoveryTarget.recoveryId());\n- oldRecoveryTarget.cancel(\"recovery reset cancelled\"); // if finalOldRecoveryTarget.resetRecovery did not even get to execute\n- return null;\n- }\n- if (successfulReset.get() == false) {\n- cancelRecovery(newRecoveryTarget.recoveryId(), \"failed to reset recovery\");\n- return null;\n- } else {\n+ boolean successfulReset = oldRecoveryTarget.resetRecovery(newRecoveryTarget.CancellableThreads());\n+ if (successfulReset) {\n logger.trace(\"{} restarted recovery from {}, id [{}], previous id [{}]\", newRecoveryTarget.shardId(),\n newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), oldRecoveryTarget.recoveryId());\n return newRecoveryTarget;\n+ } else {\n+ logger.trace(\"{} recovery could not be reset as it is already cancelled, recovery from {}, id [{}], previous id [{}]\",\n+ newRecoveryTarget.shardId(), newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(),\n+ oldRecoveryTarget.recoveryId());\n+ cancelRecovery(newRecoveryTarget.recoveryId(), \"recovery cancelled during reset\");\n+ return null;\n }\n } catch (Exception e) {\n // fail shard to be safe", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java", "status": "modified" }, { "diff": "@@ -36,6 +36,7 @@\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.common.lucene.Lucene;\n+import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.Callback;\n import org.elasticsearch.common.util.CancellableThreads;\n import org.elasticsearch.common.util.concurrent.AbstractRefCounted;\n@@ -56,6 +57,8 @@\n import java.util.Map.Entry;\n import java.util.concurrent.ConcurrentMap;\n import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.TimeUnit;\n+import java.util.concurrent.TimeoutException;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicLong;\n \n@@ -182,17 +185,21 @@ public void renameAllTempFiles() throws IOException {\n * Closes the current recovery target and waits up to a certain timeout for resources to be freed.\n * Returns true if resetting the recovery was successful, false if the recovery target is already cancelled / failed or marked as done.\n */\n- boolean resetRecovery() throws InterruptedException, IOException {\n+ boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOException {\n if (finished.compareAndSet(false, true)) {\n try {\n- // yes, this is just a logger call in a try-finally block. The reason for this is that resetRecovery is called from\n- // CancellableThreads and we have to make sure that all references to IndexShard are cleaned up before exiting this method\n logger.debug(\"reset of recovery with shard {} and id [{}]\", shardId, recoveryId);\n } finally {\n // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now.\n decRef();\n }\n- closedLatch.await();\n+ try {\n+ newTargetCancellableThreads.execute(closedLatch::await);\n+ } catch (CancellableThreads.ExecutionCancelledException e) {\n+ logger.trace(\"new recovery target cancelled for shard {} while waiting on old recovery target with id [{}] to close\",\n+ shardId, recoveryId);\n+ return false;\n+ }\n RecoveryState.Stage stage = indexShard.recoveryState().getStage();\n if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) {\n // once primary relocation has moved past the finalization step, the relocation source can be moved to RELOCATED state", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java", "status": "modified" } ] }
{ "body": "`field_stats` extracts the min/max information from the legacy GeoPoints indexed with terms. The min/max are then printed using BytesRef.utf8ToString(). These terms are not utf8 valid string and then some call to `field_stats` may produce an ArrayIndexOutOfBoundsException.\r\n\r\nFor instance this recreation fails in 2.x:\r\n\r\n````\r\nPUT t\r\n{\r\n \"mappings\": {\r\n \"t\": {\r\n \"properties\": {\r\n \"geo\": {\r\n \"type\": \"geo_point\",\r\n \"lat_lon\": true,\r\n \"geohash\": true,\r\n \"geohash_prefix\": true,\r\n \"geohash_precision\": 8\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPOST t/t\r\n{\r\n \"geo\": [\r\n {\r\n \"lat\": 0.6136896834032317, \r\n \"lon\": 0.45219577457596616\r\n }\r\n ]\r\n \r\n}\r\n\r\nGET _field_stats?fields=geo\r\n````\r\n\r\nSimilarly if this index is upgraded to 5.x the call to `field_stats` will fail with the same exception.", "comments": [], "number": 22384, "title": "Terms GeoPoints may break FieldStats" }
{ "body": "Currently `geo_point` and `geo_shape` field are treated as `text` field by the field stats API and we\r\ntry to extract the min/max values with MultiFields.getTerms.\r\nThis is ok in master because a `geo_point` field is always a Point field but it can cause problem in 5.x (and 2.x) because the legacy\r\n `geo_point` are indexed as terms.\r\n As a result the min and max are extracted and then printed in the FieldStats output using BytesRef.utf8ToString\r\n which can throw an IndexOutOfBoundException since it's not valid UTF8 strings.\r\n This change ensure that we never try to extract min/max information from a `geo_point` field.\r\n It does not add a new type for geo points in the fieldstats API so we'll continue to use `text` for this kind of field.\r\n This PR is targeted to master even though we could only commit this change to 5.x. I think it's cleaner to have it in master too before we make any decision on\r\n https://github.com/elastic/elasticsearch/pull/21947.\r\n\r\nFixes #22384", "number": 22391, "review_comments": [], "title": "Implement stats for geo_point and geo_shape field" }
{ "commits": [ { "message": "Implement stats for geo_point and geo_shape field\n\nCurrently `geo_point` and `geo_shape` field are treated as `text` field by the field stats API and we\ntry to extract the min/max values with MultiFields.getTerms.\nThis is ok in master because a `geo_point` field is always a Point field but it can cause problem in 5.x (and 2.x) because the legacy\n `geo_point` are indexed as terms.\n As a result the min and max are extracted and then printed in the FieldStats output using BytesRef.utf8ToString\n which can throw an IndexOutOfBoundException since it's not valid UTF8 strings.\n This change ensure that we never try to extract min/max information from a `geo_point` field.\n It does not add a new type for geo points in the fieldstats API so we'll continue to use `text` for this kind of field.\n This PR is targeted to master even though we could only commit this change to 5.x. I think it's cleaner to have it in master too before we make any decision on\n https://github.com/elastic/elasticsearch/pull/21947.\n\nFixes #22384" } ], "files": [ { "diff": "@@ -19,14 +19,15 @@\n \n package org.elasticsearch.index.mapper;\n \n-import org.apache.lucene.document.Field;\n-import org.apache.lucene.index.IndexOptions;\n+import org.apache.lucene.index.FieldInfo;\n+import org.apache.lucene.index.IndexReader;\n import org.apache.lucene.index.IndexableField;\n import org.apache.lucene.search.Query;\n-import org.elasticsearch.common.geo.GeoHashUtils;\n import org.elasticsearch.ElasticsearchParseException;\n+import org.elasticsearch.action.fieldstats.FieldStats;\n import org.elasticsearch.common.Explicit;\n import org.elasticsearch.common.Nullable;\n+import org.elasticsearch.common.geo.GeoHashUtils;\n import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.common.geo.GeoUtils;\n import org.elasticsearch.common.logging.DeprecationLogger;\n@@ -170,6 +171,20 @@ public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZ\n public Query termQuery(Object value, QueryShardContext context) {\n throw new QueryShardException(context, \"Geo fields do not support exact searching, use dedicated geo queries instead: [\" + name() + \"]\");\n }\n+\n+ @Override\n+ public FieldStats stats(IndexReader reader) throws IOException {\n+ int maxDoc = reader.maxDoc();\n+ FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());\n+ if (fi == null) {\n+ return null;\n+ }\n+ /**\n+ * we don't have a specific type for geo_point so we use an empty {@link FieldStats.Text}.\n+ * TODO: we should maybe support a new type that knows how to (de)encode the min/max information\n+ */\n+ return new FieldStats.Text(maxDoc, -1, -1, -1, isSearchable(), isAggregatable());\n+ }\n }\n \n protected Explicit<Boolean> ignoreMalformed;", "filename": "core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java", "status": "modified" }, { "diff": "@@ -18,12 +18,11 @@\n */\n package org.elasticsearch.index.mapper;\n \n-import org.apache.lucene.index.IndexableField;\n-import org.locationtech.spatial4j.shape.Point;\n-import org.locationtech.spatial4j.shape.Shape;\n-import org.locationtech.spatial4j.shape.jts.JtsGeometry;\n import org.apache.lucene.document.Field;\n+import org.apache.lucene.index.FieldInfo;\n import org.apache.lucene.index.IndexOptions;\n+import org.apache.lucene.index.IndexReader;\n+import org.apache.lucene.index.IndexableField;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;\n import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;\n@@ -33,8 +32,8 @@\n import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;\n import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;\n import org.elasticsearch.Version;\n+import org.elasticsearch.action.fieldstats.FieldStats;\n import org.elasticsearch.common.Explicit;\n-import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.geo.GeoUtils;\n import org.elasticsearch.common.geo.SpatialStrategy;\n import org.elasticsearch.common.geo.builders.ShapeBuilder;\n@@ -45,6 +44,9 @@\n import org.elasticsearch.common.xcontent.support.XContentMapValues;\n import org.elasticsearch.index.query.QueryShardContext;\n import org.elasticsearch.index.query.QueryShardException;\n+import org.locationtech.spatial4j.shape.Point;\n+import org.locationtech.spatial4j.shape.Shape;\n+import org.locationtech.spatial4j.shape.jts.JtsGeometry;\n \n import java.io.IOException;\n import java.util.Iterator;\n@@ -415,6 +417,20 @@ public PrefixTreeStrategy resolveStrategy(String strategyName) {\n public Query termQuery(Object value, QueryShardContext context) {\n throw new QueryShardException(context, \"Geo fields do not support exact searching, use dedicated geo queries instead\");\n }\n+\n+ @Override\n+ public FieldStats stats(IndexReader reader) throws IOException {\n+ int maxDoc = reader.maxDoc();\n+ FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());\n+ if (fi == null) {\n+ return null;\n+ }\n+ /**\n+ * we don't have a specific type for geo_shape so we use an empty {@link FieldStats.Text}.\n+ * TODO: we should maybe support a new type that knows how to (de)encode the min/max information\n+ */\n+ return new FieldStats.Text(maxDoc, -1, -1, -1, isSearchable(), isAggregatable());\n+ }\n }\n \n protected Explicit<Boolean> coerce;", "filename": "core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java", "status": "modified" }, { "diff": "@@ -12,6 +12,12 @@ setup:\n type: long\n bar:\n type: long\n+ geo:\n+ type: geo_point\n+ geo_shape:\n+ type: geo_shape\n+ tree: quadtree\n+ precision: 1m\n \n - do:\n indices.create:\n@@ -26,20 +32,26 @@ setup:\n type: long\n bar:\n type: text\n+ geo:\n+ type: geo_point\n+ geo_shape:\n+ type: geo_shape\n+ tree: quadtree\n+ precision: 1m\n \n - do:\n index:\n index: test_1\n type: test\n id: id_1\n- body: { foo: \"bar\", number: 123, bar: 123 }\n+ body: { foo: \"bar\", number: 123, bar: 123, geo: { lat: 48.858093, lon: 2.294694} }\n \n - do:\n index:\n index: test_2\n type: test\n id: id_10\n- body: { foo: \"babar\", number: 456, bar: \"123\" }\n+ body: { foo: \"babar\", number: 456, bar: \"123\", geo_shape: {type: \"linestring\", coordinates : [[-77.03653, 38.897676], [-77.009051, 38.889939]] } }\n \n - do:\n indices.refresh: {}\n@@ -48,7 +60,7 @@ setup:\n \"Basic field stats\":\n - do:\n field_stats:\n- fields: [foo, number]\n+ fields: [foo, number, geo, geo_shape]\n \n - match: { indices._all.fields.foo.max_doc: 2 }\n - match: { indices._all.fields.foo.doc_count: 2 }\n@@ -68,13 +80,54 @@ setup:\n - match: { indices._all.fields.number.max_value: 456 }\n - match: { indices._all.fields.number.max_value_as_string: \"456\" }\n - match: { indices._all.fields.number.type: \"integer\" }\n+ - match: { indices._all.fields.geo.type: \"string\" }\n+ - match: { indices._all.fields.geo.max_doc: 1 }\n+ - match: { indices._all.fields.geo.doc_count: -1 }\n+ - match: { indices._all.fields.geo.searchable: true }\n+ - match: { indices._all.fields.geo.aggregatable: true }\n+ - match: { indices._all.fields.geo_shape.type: \"string\" }\n+ - match: { indices._all.fields.geo_shape.max_doc: 1 }\n+ - match: { indices._all.fields.geo_shape.searchable: true }\n+ - match: { indices._all.fields.geo_shape.aggregatable: false }\n+\n - is_false: conflicts\n \n+---\n+\"Geopoint field stats\":\n+ - skip:\n+ version: \" - 5.2.0\"\n+ reason: geo_point fields don't return min/max for versions greater than 5.2.0\n+\n+ - do:\n+ field_stats:\n+ fields: [geo, geo_shape]\n+\n+ - match: { indices._all.fields.geo.type: \"string\" }\n+ - match: { indices._all.fields.geo.max_doc: 1 }\n+ - match: { indices._all.fields.geo.doc_count: -1 }\n+ - match: { indices._all.fields.geo.searchable: true }\n+ - match: { indices._all.fields.geo.aggregatable: true }\n+ - is_false: indices._all.fields.geo.min_value\n+ - is_false: indices._all.fields.geo.max_value\n+ - is_false: indices._all.fields.geo.min_value_as_string\n+ - is_false: indices._all.fields.geo.max_value_as_string\n+ - match: { indices._all.fields.geo_shape.type: \"string\" }\n+ - match: { indices._all.fields.geo_shape.max_doc: 1 }\n+ - match: { indices._all.fields.geo_shape.doc_count: -1 }\n+ - match: { indices._all.fields.geo_shape.searchable: true }\n+ - match: { indices._all.fields.geo_shape.aggregatable: false }\n+ - is_false: indices._all.fields.geo_shape.min_value\n+ - is_false: indices._all.fields.geo_shape.max_value\n+ - is_false: indices._all.fields.geo_shape.min_value_as_string\n+ - is_false: indices._all.fields.geo_shape.max_value_as_string\n+ - is_false: conflicts\n+\n+\n ---\n \"Basic field stats with level set to indices\":\n - do:\n field_stats:\n- fields: [foo, number]\n+ fields: [foo, number, geo, geo_shape]\n level: indices\n \n - match: { indices.test_1.fields.foo.max_doc: 1 }\n@@ -95,6 +148,10 @@ setup:\n - match: { indices.test_1.fields.number.max_value: 123 }\n - match: { indices.test_1.fields.number.max_value_as_string: \"123\" }\n - match: { indices.test_1.fields.number.type: \"integer\" }\n+ - match: { indices.test_1.fields.geo.type: \"string\" }\n+ - match: { indices.test_1.fields.geo.max_doc: 1 }\n+ - match: { indices.test_1.fields.geo.searchable: true }\n+ - match: { indices.test_1.fields.geo.aggregatable: true }\n - match: { indices.test_2.fields.foo.max_doc: 1 }\n - match: { indices.test_2.fields.foo.doc_count: 1 }\n - match: { indices.test_2.fields.foo.min_value: \"babar\" }\n@@ -114,6 +171,45 @@ setup:\n - match: { indices.test_2.fields.number.max_value: 456 }\n - match: { indices.test_2.fields.number.max_value_as_string: \"456\" }\n - match: { indices.test_2.fields.number.type: \"integer\" }\n+ - match: { indices.test_2.fields.geo_shape.type: \"string\" }\n+ - match: { indices.test_2.fields.geo_shape.max_doc: 1 }\n+ - match: { indices.test_2.fields.geo_shape.searchable: true }\n+ - match: { indices.test_2.fields.geo_shape.aggregatable: false }\n+ - is_false: indices.test_2.fields.geo\n+ - is_false: conflicts\n+\n+\n+---\n+\"Geopoint field stats with level set to indices\":\n+ - skip:\n+ version: \" - 5.2.0\"\n+ reason: geo_point fields don't return min/max for versions greater than 5.2.0\n+\n+ - do:\n+ field_stats:\n+ fields: [geo, geo_shape]\n+ level: indices\n+\n+ - match: { indices.test_1.fields.geo.max_doc: 1 }\n+ - match: { indices.test_1.fields.geo.doc_count: -1 }\n+ - is_false: indices.test_1.fields.geo.min_value\n+ - is_false: indices.test_1.fields.geo.max_value\n+ - is_false: indices.test_1.fields.geo.min_value_as_string\n+ - is_false: indices.test_1.fields.geo.max_value_as_string\n+ - match: { indices.test_1.fields.geo.searchable: true }\n+ - match: { indices.test_1.fields.geo.aggregatable: true }\n+ - match: { indices.test_1.fields.geo.type: \"string\" }\n+ - is_false: indices.test_2.fields.geo\n+ - match: { indices.test_2.fields.geo_shape.max_doc: 1 }\n+ - match: { indices.test_2.fields.geo_shape.doc_count: -1 }\n+ - is_false: indices.test_2.fields.geo_shape.min_value\n+ - is_false: indices.test_2.fields.geo_shape.max_value\n+ - is_false: indices.test_2.fields.geo_shape.min_value_as_string\n+ - is_false: indices.test_2.fields.geo_shape.max_value_as_string\n+ - match: { indices.test_2.fields.geo_shape.searchable: true }\n+ - match: { indices.test_2.fields.geo_shape.aggregatable: false }\n+ - match: { indices.test_2.fields.geo_shape.type: \"string\" }\n+ - is_false: indices.test_2.fields.geo\n - is_false: conflicts\n \n ---", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.1.1\r\n\r\n**Plugins installed**:\r\n```javascript\r\n[{\r\n \"name\" : \"x-pack\",\r\n \"version\" : \"5.1.1\",\r\n \"description\" : \"Elasticsearch Expanded Pack Plugin\",\r\n \"classname\" : \"org.elasticsearch.xpack.XPackPlugin\"\r\n}]\r\n```\r\n\r\n**JVM version**: 1.8.0_111\r\n\r\n**OS version**: Ubuntu 16.04 (Linux amd64 4.4.0-57-generic)\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nAttempting to index a percolate query that uses `gte` or `gt` in a range query against a field of type `date` fails with the following error:\r\n\r\n```javascript\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"mapper_parsing_exception\",\r\n \"reason\": \"failed to parse\"\r\n }\r\n ],\r\n \"type\": \"mapper_parsing_exception\",\r\n \"reason\": \"failed to parse\",\r\n \"caused_by\": {\r\n \"type\": \"null_pointer_exception\",\r\n \"reason\": null\r\n }\r\n },\r\n \"status\": 400\r\n}\r\n```\r\n\r\nRange queries using `gte` or `gt` fail to index; however both `lte` and `lt` work properly. Note that the queries listed below are using fixed dates. Thus this note regarding breaking changes in ES 5.0 **DOES NOT APPLY**:\r\n\r\n> The percolator no longer accepts percolator queries containing range queries with ranges that are based on current time (using now).\r\n\r\nIf it helps to know, our setup is a new cluster installed from scratch on ES 5.1.1. There was no index upgrade done from a previous version, nor was any of the data copied. Fresh install, new mapping definitions and data inserts as per the example below.\r\n\r\n**Steps to reproduce**:\r\n 1. Create a new index (named `searches`) with the type mapping for percolate pre-processing (named `item`, with a date field named `created`), as well as the type mapping for storing the percolate queries themselves (named `search`, with the percolate field named `query`).\r\n\r\n```javascript\r\nPUT searches\r\n{\r\n \"mappings\": {\r\n \"item\": {\r\n \"properties\": {\r\n \"created\": {\r\n \"type\": \"date\"\r\n }\r\n }\r\n },\r\n \"search\": {\r\n \"properties\": {\r\n \"query\": {\r\n \"type\": \"percolator\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\n 2. Attempt to index the following queries. The only difference is the range operator used (`lt`, `lte`, `gt`, and `gte`). The first two (`lte` and `lt`) will index, while the other two (`gte` and `gt`) throw the `null_pointer_exception` error.\r\n\r\n```javascript\r\nPUT searches/search/1\r\n{ \"query\": { \"range\": { \"created\": { \"lte\": \"2016-01-01\" } } } }\r\n```\r\n\r\n```javascript\r\nPUT searches/search/2\r\n{ \"query\": { \"range\": { \"created\": { \"lt\": \"2016-01-01\" } } } }\r\n```\r\n\r\n```javascript\r\nPUT searches/search/3\r\n{ \"query\": { \"range\": { \"created\": { \"gte\": \"2016-01-01\" } } } }\r\n```\r\n\r\n```javascript\r\nPUT searches/search/4\r\n{ \"query\": { \"range\": { \"created\": { \"gt\": \"2016-01-01\" } } } }\r\n```", "comments": [ { "body": "@reol-nbessette Thanks for reporting, this is a bug in the now range validation for percolator queries containing range queries.", "created_at": "2016-12-27T20:12:59Z" }, { "body": "@martijnvg Now that's a fast debug session and commit. Thank you so much!\r\n\r\nShould someone else stumble across this issue before the next patch release, here's a client-side workaround if you're not wanting to patch the server directly. The bug is that when a \"from\" date (via `gt` or `gte`) is specified, a \"to\" date (via `lt` or `lte`) is erroneously expected as well. We can work around by attaching an `lte` range check with a large value exceeding all your real date values. The year 9999 seems to work even with strict date formats. Thus, as an example:\r\n\r\n```javascript\r\n{\r\n \"query\": {\r\n \"range\": {\r\n \"created\": {\r\n \"gte\": \"2016-01-01\"\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\ncan be worked around with:\r\n\r\n```javascript\r\n{\r\n \"query\": {\r\n \"range\": {\r\n \"created\": {\r\n \"gte\": \"2016-01-01\",\r\n \"lte\": \"9999-12-31\"\r\n }\r\n }\r\n }\r\n}\r\n```", "created_at": "2016-12-27T20:42:27Z" } ], "number": 22355, "title": "percolator: null_pointer_exception when storing query with date range query using \"gt\" or \"gte\" operator" }
{ "body": "Fixes #22355", "number": 22356, "review_comments": [], "title": "Fix NPE in percolator's 'now' range check for percolator queries with range queries" }
{ "commits": [ { "message": "percolator: Fix NPE in percolator's 'now' range check for percolator queries with range queries.\n\nCloses #22355" } ], "files": [ { "diff": "@@ -377,12 +377,18 @@ static void verifyQuery(QueryBuilder queryBuilder) {\n RangeQueryBuilder rangeQueryBuilder = (RangeQueryBuilder) queryBuilder;\n if (rangeQueryBuilder.from() instanceof String) {\n String from = (String) rangeQueryBuilder.from();\n- String to = (String) rangeQueryBuilder.to();\n- if (from.contains(\"now\") || to.contains(\"now\")) {\n+ if (from.contains(\"now\")) {\n throw new IllegalArgumentException(\"percolator queries containing time range queries based on the \" +\n \"current time is unsupported\");\n }\n }\n+ if (rangeQueryBuilder.to() instanceof String) {\n+ String to = (String) rangeQueryBuilder.to();\n+ if (to.contains(\"now\")) {\n+ throw new IllegalArgumentException(\"percolator queries containing time range queries based on the \" +\n+ \"current time is unsupported\");\n+ }\n+ }\n } else if (queryBuilder instanceof HasChildQueryBuilder) {\n throw new IllegalArgumentException(\"the [has_child] query is unsupported inside a percolator query\");\n } else if (queryBuilder instanceof HasParentQueryBuilder) {", "filename": "modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java", "status": "modified" }, { "diff": "@@ -442,6 +442,53 @@ public void testRangeQueryWithNowRangeIsForbidden() throws Exception {\n }\n );\n assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));\n+ e = expectThrows(MapperParsingException.class, () -> {\n+ mapperService.documentMapper(typeName).parse(\"test\", typeName, \"1\",\n+ jsonBuilder().startObject()\n+ .field(fieldName, rangeQuery(\"date_field\").from(\"now\"))\n+ .endObject().bytes());\n+ }\n+ );\n+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));\n+ e = expectThrows(MapperParsingException.class, () -> {\n+ mapperService.documentMapper(typeName).parse(\"test\", typeName, \"1\",\n+ jsonBuilder().startObject()\n+ .field(fieldName, rangeQuery(\"date_field\").to(\"now\"))\n+ .endObject().bytes());\n+ }\n+ );\n+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));\n+ }\n+\n+ // https://github.com/elastic/elasticsearch/issues/22355\n+ public void testVerifyRangeQueryWithNullBounds() throws Exception {\n+ addQueryMapping();\n+ MapperParsingException e = expectThrows(MapperParsingException.class, () -> {\n+ mapperService.documentMapper(typeName).parse(\"test\", typeName, \"1\",\n+ jsonBuilder().startObject()\n+ .field(fieldName, rangeQuery(\"date_field\").from(\"now\").to(null))\n+ .endObject().bytes());\n+ }\n+ );\n+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));\n+ e = expectThrows(MapperParsingException.class, () -> {\n+ mapperService.documentMapper(typeName).parse(\"test\", typeName, \"1\",\n+ jsonBuilder().startObject()\n+ .field(fieldName, rangeQuery(\"date_field\").from(null).to(\"now\"))\n+ .endObject().bytes());\n+ }\n+ );\n+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));\n+\n+ // No validation failures:\n+ mapperService.documentMapper(typeName).parse(\"test\", typeName, \"1\",\n+ jsonBuilder().startObject()\n+ .field(fieldName, rangeQuery(\"date_field\").from(\"2016-01-01\").to(null))\n+ .endObject().bytes());\n+ mapperService.documentMapper(typeName).parse(\"test\", typeName, \"1\",\n+ jsonBuilder().startObject()\n+ .field(fieldName, rangeQuery(\"date_field\").from(null).to(\"2016-01-01\"))\n+ .endObject().bytes());\n }\n \n public void testUnsupportedQueries() {", "filename": "modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java", "status": "modified" } ] }
{ "body": "Currently `docvalues_fields` return the values of the fields as they are stored\r\nin doc values. I don't like that it exposes implementation details, but there\r\nare also user-facing issues like the fact it cannot work with binary fields.\r\nThis change will also make it easier for users to reindex if they do not store\r\nthe source, since `docvalues_fields` will return data is such a format that it\r\ncan be put in an indexing request with the same mappings.\r\n\r\nThe hard part of the change is backward compatibility, since it is breaking.\r\nThe approach taken here is that 5.x will keep exposing the internal\r\nrepresentation, with a special format name called `use_field_format` which\r\nwill format the field depending on how it is mapped. This will become the\r\ndefault in 6.0, and this hardcoded format name will be removed in 7.0 to ease\r\nthe transition from 5.x to 6.x.", "comments": [ { "body": "+1 as well. I think it's useful for dates, it also fixes problems we can have with binary doc_values. Not sure about the name of the BWC param though, in the code you use `use_field_format` and in the docs you use `use_field_defaults`. What about `use_field_mapping` ? ", "created_at": "2016-12-14T16:37:53Z" }, { "body": "Woooooops, good catch. I like `use_field_mapping` too, I'll wait for this to be discussed in Fixit Friday to see what others think about it.", "created_at": "2016-12-14T17:04:49Z" }, { "body": "I'm removing the discuss label since we agreed to do this in FixitFriday. I also opened another PR for 5.x to better show the bw compat layer: #22354.", "created_at": "2016-12-27T16:35:18Z" }, { "body": "@jpountz Should this PR (and the related 5.x PR) be brought up to date and merged?", "created_at": "2017-06-09T06:51:59Z" }, { "body": "@jpountz ping on this issue again for updating and merging (or closing if it's not needed any more)", "created_at": "2017-08-15T20:46:34Z" }, { "body": "this PR needs sync with Kibana in order to be merged so I'll do it after 6.0 is out", "created_at": "2017-08-16T07:46:15Z" } ], "number": 22146, "title": "Format doc values fields." }
{ "body": "Backport of #22146\r\n\r\nNote to reviewers: the diff between this change and #22146 is contained in the 2nd commit.", "number": 22354, "review_comments": [], "title": "Format doc_value fields" }
{ "commits": [ { "message": "Format doc values fields.\n\nCurrently `docvalues_fields` return the values of the fields as they are stored\nin doc values. I don't like that it exposes implementation details, but there\nare also user-facing issues like the fact it cannot work with binary fields.\nThis change will also make it easier for users to reindex if they do not store\nthe source, since `docvalues_fields` will return data is such a format that it\ncan be put in an indexing request with the same mappings.\n\nThe hard part of the change is backward compatibility, since it is breaking.\nThe approach taken here is that 5.x will keep exposing the internal\nrepresentation, with a special format name called `use_field_format` which\nwill format the field depending on how it is mapped. This will become the\ndefault in 6.0, and this hardcoded format name will be removed in 7.0 to ease\nthe transition from 5.x to 6.x." }, { "message": "BW compat." }, { "message": "Add doc about `use_field_mapping`." }, { "message": "Fix UOE." }, { "message": "iter" }, { "message": "Add skip." }, { "message": "iter" } ], "files": [ { "diff": "@@ -129,7 +129,7 @@ private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder optio\n }\n }\n if (options.getDocValueFields() != null) {\n- options.getDocValueFields().forEach(groupSource::docValueField);\n+ options.getDocValueFields().forEach(field -> groupSource.docValueField(field.getName(), field.getFormat()));\n }\n if (options.getStoredFieldsContext() != null && options.getStoredFieldsContext().fieldNames() != null) {\n options.getStoredFieldsContext().fieldNames().forEach(groupSource::storedField);", "filename": "core/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java", "status": "modified" }, { "diff": "@@ -284,7 +284,7 @@ public SearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullabl\n \n /**\n * Adds a docvalue based field to load and return. The field does not have to be stored,\n- * but its recommended to use non analyzed or numeric fields.\n+ * but its recommended to use non analyzed fields.\n *\n * @param name The field to get from the docvalue\n */\n@@ -293,6 +293,18 @@ public SearchRequestBuilder addDocValueField(String name) {\n return this;\n }\n \n+ /**\n+ * Adds a docvalue based field to load and return. The field does not have to be stored,\n+ * but its recommended to use non analyzed fields.\n+ *\n+ * @param name The field to get from the docvalue\n+ * @param format How to format the field, {@code null} to use defaults.\n+ */\n+ public SearchRequestBuilder addDocValueField(String name, String format) {\n+ sourceBuilder().docValueField(name, format);\n+ return this;\n+ }\n+\n /**\n * Adds a stored field to load and return (note, it must be stored) as part of the search request.\n */", "filename": "core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java", "status": "modified" }, { "diff": "@@ -36,6 +36,8 @@\n import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData;\n import org.elasticsearch.index.query.QueryShardContext;\n import org.elasticsearch.index.query.QueryShardException;\n+import org.elasticsearch.search.DocValueFormat;\n+import org.joda.time.DateTimeZone;\n \n import java.io.IOException;\n import java.util.Base64;\n@@ -123,6 +125,11 @@ public BytesReference valueForDisplay(Object value) {\n return bytes;\n }\n \n+ @Override\n+ public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) {\n+ return DocValueFormat.BINARY;\n+ }\n+\n @Override\n public IndexFieldData.Builder fielddataBuilder() {\n failIfNoDocValues();", "filename": "core/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n \n import org.elasticsearch.Version;\n import org.elasticsearch.action.support.ToXContentToBytes;\n+import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.io.stream.StreamInput;\n@@ -54,6 +55,7 @@\n import java.util.Objects;\n import java.util.Optional;\n import java.util.Set;\n+import java.util.stream.Collectors;\n \n import static org.elasticsearch.common.xcontent.XContentParser.Token.END_OBJECT;\n \n@@ -81,7 +83,9 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl\n SearchSourceBuilder.STORED_FIELDS_FIELD + \"] to retrieve stored fields or _source filtering \" +\n \"if the field is not stored\");\n }, SearchSourceBuilder.FIELDS_FIELD, ObjectParser.ValueType.STRING_ARRAY);\n- PARSER.declareStringArray(InnerHitBuilder::setDocValueFields, SearchSourceBuilder.DOCVALUE_FIELDS_FIELD);\n+ PARSER.declareObjectArray(InnerHitBuilder::setDocValueFields,\n+ DocValueFieldsContext.Field::fromXContent,\n+ DocValueFieldsContext.DOCVALUE_FIELDS_FIELD);\n PARSER.declareField((p, i, c) -> {\n try {\n Set<ScriptField> scriptFields = new HashSet<>();\n@@ -144,7 +148,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl\n private StoredFieldsContext storedFieldsContext;\n private QueryBuilder query = DEFAULT_INNER_HIT_QUERY;\n private List<SortBuilder<?>> sorts;\n- private List<String> docValueFields;\n+ private List<DocValueFieldsContext.Field> docValueFields;\n private Set<ScriptField> scriptFields;\n private HighlightBuilder highlightBuilder;\n private FetchSourceContext fetchSourceContext;\n@@ -221,7 +225,21 @@ public InnerHitBuilder(StreamInput in) throws IOException {\n version = in.readBoolean();\n trackScores = in.readBoolean();\n storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new);\n- docValueFields = (List<String>) in.readGenericValue();\n+ if (in.getVersion().before(Version.V_5_5_0_UNRELEASED)) {\n+ List<String> fieldNameList = (List<String>) in.readGenericValue();\n+ if (fieldNameList == null) {\n+ docValueFields = null;\n+ } else {\n+ docValueFields = new ArrayList<>();\n+ for (String name : fieldNameList) {\n+ docValueFields.add(new DocValueFieldsContext.Field(name, null));\n+ }\n+ }\n+ } else if (in.readBoolean()) {\n+ docValueFields = in.readList(DocValueFieldsContext.Field::new);\n+ } else {\n+ docValueFields = null;\n+ }\n if (in.readBoolean()) {\n int size = in.readVInt();\n scriptFields = new HashSet<>(size);\n@@ -262,7 +280,15 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeBoolean(version);\n out.writeBoolean(trackScores);\n out.writeOptionalWriteable(storedFieldsContext);\n- out.writeGenericValue(docValueFields);\n+ if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {\n+ out.writeBoolean(docValueFields != null);\n+ if (docValueFields != null) {\n+ out.writeList(docValueFields);\n+ }\n+ } else {\n+ out.writeGenericValue(docValueFields == null ? null : docValueFields.stream().map(\n+ DocValueFieldsContext.Field::getName).collect(Collectors.toList()));\n+ }\n boolean hasScriptFields = scriptFields != null;\n out.writeBoolean(hasScriptFields);\n if (hasScriptFields) {\n@@ -413,7 +439,7 @@ public InnerHitBuilder setStoredFieldNames(List<String> fieldNames) {\n * @deprecated Use {@link InnerHitBuilder#getDocValueFields()} instead.\n */\n @Deprecated\n- public List<String> getFieldDataFields() {\n+ public List<DocValueFieldsContext.Field> getFieldDataFields() {\n return docValueFields;\n }\n \n@@ -423,7 +449,7 @@ public List<String> getFieldDataFields() {\n * @deprecated Use {@link InnerHitBuilder#setDocValueFields(List)} instead.\n */\n @Deprecated\n- public InnerHitBuilder setFieldDataFields(List<String> fieldDataFields) {\n+ public InnerHitBuilder setFieldDataFields(List<DocValueFieldsContext.Field> fieldDataFields) {\n this.docValueFields = fieldDataFields;\n return this;\n }\n@@ -435,24 +461,20 @@ public InnerHitBuilder setFieldDataFields(List<String> fieldDataFields) {\n */\n @Deprecated\n public InnerHitBuilder addFieldDataField(String field) {\n- if (docValueFields == null) {\n- docValueFields = new ArrayList<>();\n- }\n- docValueFields.add(field);\n- return this;\n+ return addDocValueField(field);\n }\n \n /**\n * Gets the docvalue fields.\n */\n- public List<String> getDocValueFields() {\n+ public List<DocValueFieldsContext.Field> getDocValueFields() {\n return docValueFields;\n }\n \n /**\n * Sets the stored fields to load from the docvalue and return.\n */\n- public InnerHitBuilder setDocValueFields(List<String> docValueFields) {\n+ public InnerHitBuilder setDocValueFields(List<DocValueFieldsContext.Field> docValueFields) {\n this.docValueFields = docValueFields;\n return this;\n }\n@@ -461,10 +483,19 @@ public InnerHitBuilder setDocValueFields(List<String> docValueFields) {\n * Adds a field to load from the docvalue and return.\n */\n public InnerHitBuilder addDocValueField(String field) {\n+ return addDocValueField(field, null);\n+ }\n+\n+ /**\n+ * Adds a field to load from the docvalue and return.\n+ * @param name name of the field\n+ * @param format how to format the field, or {@code null} to use the defaults\n+ */\n+ public InnerHitBuilder addDocValueField(String name, @Nullable String format) {\n if (docValueFields == null) {\n docValueFields = new ArrayList<>();\n }\n- docValueFields.add(field);\n+ docValueFields.add(new DocValueFieldsContext.Field(name, format));\n return this;\n }\n \n@@ -673,9 +704,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n storedFieldsContext.toXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), builder);\n }\n if (docValueFields != null) {\n- builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName());\n- for (String fieldDataField : docValueFields) {\n- builder.value(fieldDataField);\n+ builder.startArray(DocValueFieldsContext.DOCVALUE_FIELDS_FIELD.getPreferredName());\n+ for (DocValueFieldsContext.Field fieldDataField : docValueFields) {\n+ fieldDataField.toXContent(builder);\n }\n builder.endArray();\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java", "status": "modified" }, { "diff": "@@ -30,6 +30,8 @@\n import org.elasticsearch.common.joda.Joda;\n import org.elasticsearch.common.network.InetAddresses;\n import org.elasticsearch.common.network.NetworkAddress;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentParser;\n import org.joda.time.DateTimeZone;\n \n import java.io.IOException;\n@@ -39,39 +41,40 @@\n import java.text.NumberFormat;\n import java.text.ParseException;\n import java.util.Arrays;\n+import java.util.Base64;\n import java.util.Locale;\n import java.util.Objects;\n import java.util.function.LongSupplier;\n \n /** A formatter for values as returned by the fielddata/doc-values APIs. */\n public interface DocValueFormat extends NamedWriteable {\n \n- /** Format a long value. This is used by terms and histogram aggregations\n- * to format keys for fields that use longs as a doc value representation\n- * such as the {@code long} and {@code date} fields. */\n- String format(long value);\n+ /** Format a long value. This is used to convert the internal representation\n+ * that is used by doc values back to the original type of the object.\n+ * The return value must be supported by {@link XContentBuilder#value(Object)}. */\n+ Object format(long value);\n \n- /** Format a double value. This is used by terms and stats aggregations\n- * to format keys for fields that use numbers as a doc value representation\n- * such as the {@code long}, {@code double} or {@code date} fields. */\n- String format(double value);\n+ /** Format a double value. This is used to convert the internal representation\n+ * that is used by doc values back to the original type of the object.\n+ * The return value must be supported by {@link XContentBuilder#value(Object)}. */\n+ Object format(double value);\n \n- /** Format a double value. This is used by terms aggregations to format\n- * keys for fields that use binary doc value representations such as the\n- * {@code keyword} and {@code ip} fields. */\n- String format(BytesRef value);\n+ /** Format a binary value. This is used to convert the internal representation\n+ * that is used by doc values back to the original type of the object.\n+ * The return value must be supported by {@link XContentBuilder#value(Object)}. */\n+ Object format(BytesRef value);\n \n- /** Parse a value that was formatted with {@link #format(long)} back to the\n- * original long value. */\n- long parseLong(String value, boolean roundUp, LongSupplier now);\n+ /** Parse a value that comes from {@link XContentParser#objectBytes()} to the\n+ * internal representation that is used by doc values. */\n+ long parseLong(Object value, boolean roundUp, LongSupplier now);\n \n- /** Parse a value that was formatted with {@link #format(double)} back to\n- * the original double value. */\n- double parseDouble(String value, boolean roundUp, LongSupplier now);\n+ /** Parse a value that comes from {@link XContentParser#objectBytes()} to the\n+ * internal representation that is used by doc values. */\n+ double parseDouble(Object value, boolean roundUp, LongSupplier now);\n \n- /** Parse a value that was formatted with {@link #format(BytesRef)} back\n- * to the original BytesRef. */\n- BytesRef parseBytesRef(String value);\n+ /** Parse a value that comes from {@link XContentParser#objectBytes()} to the\n+ * internal representation that is used by doc values. */\n+ BytesRef parseBytesRef(Object value);\n \n DocValueFormat RAW = new DocValueFormat() {\n \n@@ -85,13 +88,13 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n @Override\n- public String format(long value) {\n- return Long.toString(value);\n+ public Long format(long value) {\n+ return value;\n }\n \n @Override\n- public String format(double value) {\n- return Double.toString(value);\n+ public Double format(double value) {\n+ return value;\n }\n \n @Override\n@@ -100,8 +103,8 @@ public String format(BytesRef value) {\n }\n \n @Override\n- public long parseLong(String value, boolean roundUp, LongSupplier now) {\n- double d = Double.parseDouble(value);\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n+ double d = parseDouble(value, roundUp, now);\n if (roundUp) {\n d = Math.ceil(d);\n } else {\n@@ -111,13 +114,17 @@ public long parseLong(String value, boolean roundUp, LongSupplier now) {\n }\n \n @Override\n- public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n- return Double.parseDouble(value);\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n+ if (value instanceof Number) {\n+ return ((Number) value).doubleValue();\n+ } else {\n+ return Double.parseDouble(value.toString());\n+ }\n }\n \n @Override\n- public BytesRef parseBytesRef(String value) {\n- return new BytesRef(value);\n+ public BytesRef parseBytesRef(Object value) {\n+ return new BytesRef(value.toString());\n }\n };\n \n@@ -166,17 +173,17 @@ public String format(BytesRef value) {\n }\n \n @Override\n- public long parseLong(String value, boolean roundUp, LongSupplier now) {\n- return parser.parse(value, now, roundUp, timeZone);\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n+ return parser.parse(value.toString(), now, roundUp, timeZone);\n }\n \n @Override\n- public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n return parseLong(value, roundUp, now);\n }\n \n @Override\n- public BytesRef parseBytesRef(String value) {\n+ public BytesRef parseBytesRef(Object value) {\n throw new UnsupportedOperationException();\n }\n }\n@@ -193,12 +200,12 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n @Override\n- public String format(long value) {\n+ public Object format(long value) {\n return GeoHashUtils.stringEncode(value);\n }\n \n @Override\n- public String format(double value) {\n+ public Object format(double value) {\n return format((long) value);\n }\n \n@@ -208,17 +215,17 @@ public String format(BytesRef value) {\n }\n \n @Override\n- public long parseLong(String value, boolean roundUp, LongSupplier now) {\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n throw new UnsupportedOperationException();\n }\n \n @Override\n- public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n throw new UnsupportedOperationException();\n }\n \n @Override\n- public BytesRef parseBytesRef(String value) {\n+ public BytesRef parseBytesRef(Object value) {\n throw new UnsupportedOperationException();\n }\n };\n@@ -235,13 +242,13 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n @Override\n- public String format(long value) {\n- return java.lang.Boolean.valueOf(value != 0).toString();\n+ public Boolean format(long value) {\n+ return java.lang.Boolean.valueOf(value != 0);\n }\n \n @Override\n- public String format(double value) {\n- return java.lang.Boolean.valueOf(value != 0).toString();\n+ public Boolean format(double value) {\n+ return java.lang.Boolean.valueOf(value != 0);\n }\n \n @Override\n@@ -250,8 +257,8 @@ public String format(BytesRef value) {\n }\n \n @Override\n- public long parseLong(String value, boolean roundUp, LongSupplier now) {\n- switch (value) {\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n+ switch (value.toString()) {\n case \"false\":\n return 0;\n case \"true\":\n@@ -261,12 +268,12 @@ public long parseLong(String value, boolean roundUp, LongSupplier now) {\n }\n \n @Override\n- public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n return parseLong(value, roundUp, now);\n }\n \n @Override\n- public BytesRef parseBytesRef(String value) {\n+ public BytesRef parseBytesRef(Object value) {\n throw new UnsupportedOperationException();\n }\n };\n@@ -296,22 +303,24 @@ public String format(double value) {\n public String format(BytesRef value) {\n byte[] bytes = Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length);\n InetAddress inet = InetAddressPoint.decode(bytes);\n+ // We do not return the inet address directly since XContentBuilder does not know\n+ // how to deal with it\n return NetworkAddress.format(inet);\n }\n \n @Override\n- public long parseLong(String value, boolean roundUp, LongSupplier now) {\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n throw new UnsupportedOperationException();\n }\n \n @Override\n- public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n throw new UnsupportedOperationException();\n }\n \n @Override\n- public BytesRef parseBytesRef(String value) {\n- return new BytesRef(InetAddressPoint.encode(InetAddresses.forString(value)));\n+ public BytesRef parseBytesRef(Object value) {\n+ return new BytesRef(InetAddressPoint.encode(InetAddresses.forString(value.toString())));\n }\n };\n \n@@ -358,10 +367,10 @@ public String format(BytesRef value) {\n }\n \n @Override\n- public long parseLong(String value, boolean roundUp, LongSupplier now) {\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n Number n;\n try {\n- n = format.parse(value);\n+ n = format.parse(value.toString());\n } catch (ParseException e) {\n throw new RuntimeException(e);\n }\n@@ -379,19 +388,62 @@ public long parseLong(String value, boolean roundUp, LongSupplier now) {\n }\n \n @Override\n- public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n Number n;\n try {\n- n = format.parse(value);\n+ n = format.parse(value.toString());\n } catch (ParseException e) {\n throw new RuntimeException(e);\n }\n return n.doubleValue();\n }\n \n @Override\n- public BytesRef parseBytesRef(String value) {\n+ public BytesRef parseBytesRef(Object value) {\n throw new UnsupportedOperationException();\n }\n }\n+\n+ DocValueFormat BINARY = new DocValueFormat() {\n+\n+ @Override\n+ public String getWriteableName() {\n+ return \"binary\";\n+ }\n+\n+ @Override\n+ public void writeTo(StreamOutput out) throws IOException {\n+ }\n+\n+ @Override\n+ public Long format(long value) {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ @Override\n+ public Double format(double value) {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ @Override\n+ public String format(BytesRef value) {\n+ return Base64.getEncoder().encodeToString(\n+ Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length));\n+ }\n+\n+ @Override\n+ public long parseLong(Object value, boolean roundUp, LongSupplier now) {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ @Override\n+ public double parseDouble(Object value, boolean roundUp, LongSupplier now) {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ @Override\n+ public BytesRef parseBytesRef(Object value) {\n+ return new BytesRef(Base64.getDecoder().decode(value.toString()));\n+ }\n+ };\n }", "filename": "core/src/main/java/org/elasticsearch/search/DocValueFormat.java", "status": "modified" }, { "diff": "@@ -630,6 +630,7 @@ private void registerValueFormats() {\n registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH);\n registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP);\n registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW);\n+ registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY);\n }\n \n /**", "filename": "core/src/main/java/org/elasticsearch/search/SearchModule.java", "status": "modified" }, { "diff": "@@ -85,7 +85,7 @@ public void writeTo(StreamOutput out) throws IOException {\n \n @Override\n public String getKeyAsString() {\n- return format.format(key);\n+ return format.format(key).toString();\n }\n \n @Override\n@@ -116,7 +116,7 @@ Bucket reduce(List<Bucket> buckets, ReduceContext context) {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- String keyAsString = format.format(key);\n+ String keyAsString = format.format(key).toString();\n if (keyed) {\n builder.startObject(keyAsString);\n } else {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java", "status": "modified" }, { "diff": "@@ -81,7 +81,7 @@ public void writeTo(StreamOutput out) throws IOException {\n \n @Override\n public String getKeyAsString() {\n- return format.format(key);\n+ return format.format(key).toString();\n }\n \n @Override\n@@ -112,7 +112,7 @@ Bucket reduce(List<Bucket> buckets, ReduceContext context) {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- String keyAsString = format.format(key);\n+ String keyAsString = format.format(key).toString();\n if (keyed) {\n builder.startObject(keyAsString);\n } else {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java", "status": "modified" }, { "diff": "@@ -122,9 +122,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n if (keyed) {\n if (key == null) {\n StringBuilder keyBuilder = new StringBuilder();\n- keyBuilder.append(from == null ? \"*\" : format.format(from));\n+ keyBuilder.append(from == null ? \"*\" : format.format(from).toString());\n keyBuilder.append(\"-\");\n- keyBuilder.append(to == null ? \"*\" : format.format(to));\n+ keyBuilder.append(to == null ? \"*\" : format.format(to).toString());\n key = keyBuilder.toString();\n }\n builder.startObject(key);\n@@ -153,7 +153,7 @@ public Object getFrom() {\n \n @Override\n public String getFromAsString() {\n- return from == null ? null : format.format(from);\n+ return from == null ? null : format.format(from).toString();\n }\n \n @Override\n@@ -163,7 +163,7 @@ public Object getTo() {\n \n @Override\n public String getToAsString() {\n- return to == null ? null : format.format(to);\n+ return to == null ? null : format.format(to).toString();\n }\n \n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java", "status": "modified" }, { "diff": "@@ -100,7 +100,7 @@ public String getFromAsString() {\n if (Double.isInfinite(from)) {\n return null;\n } else {\n- return format.format(from);\n+ return format.format(from).toString();\n }\n }\n \n@@ -109,7 +109,7 @@ public String getToAsString() {\n if (Double.isInfinite(to)) {\n return null;\n } else {\n- return format.format(to);\n+ return format.format(to).toString();\n }\n }\n \n@@ -149,13 +149,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n if (!Double.isInfinite(from)) {\n builder.field(CommonFields.FROM.getPreferredName(), from);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.FROM_AS_STRING.getPreferredName(), format.format(from));\n+ builder.field(CommonFields.FROM_AS_STRING.getPreferredName(), format.format(from).toString());\n }\n }\n if (!Double.isInfinite(to)) {\n builder.field(CommonFields.TO.getPreferredName(), to);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.TO_AS_STRING.getPreferredName(), format.format(to));\n+ builder.field(CommonFields.TO_AS_STRING.getPreferredName(), format.format(to).toString());\n }\n }\n builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java", "status": "modified" }, { "diff": "@@ -82,7 +82,7 @@ int compareTerm(SignificantTerms.Bucket other) {\n \n @Override\n public String getKeyAsString() {\n- return format.format(term);\n+ return format.format(term).toString();\n }\n \n @Override\n@@ -100,7 +100,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.startObject();\n builder.field(CommonFields.KEY.getPreferredName(), term);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));\n+ builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());\n }\n builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());\n builder.field(\"score\", score);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java", "status": "modified" }, { "diff": "@@ -87,7 +87,7 @@ int compareTerm(SignificantTerms.Bucket other) {\n \n @Override\n public String getKeyAsString() {\n- return format.format(termBytes);\n+ return format.format(termBytes).toString();\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java", "status": "modified" }, { "diff": "@@ -143,12 +143,12 @@ private long getBackgroundFrequency(String value) throws IOException {\n }\n \n public long getBackgroundFrequency(BytesRef termBytes) throws IOException {\n- String value = config.format().format(termBytes);\n+ String value = config.format().format(termBytes).toString();\n return getBackgroundFrequency(value);\n }\n \n public long getBackgroundFrequency(long termNum) throws IOException {\n- String value = config.format().format(termNum);\n+ String value = config.format().format(termNum).toString();\n return getBackgroundFrequency(value);\n }\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -59,7 +59,7 @@ protected void writeTermTo(StreamOutput out) throws IOException {\n \n @Override\n public String getKeyAsString() {\n- return format.format(term);\n+ return format.format(term).toString();\n }\n \n @Override\n@@ -86,7 +86,7 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) {\n protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {\n builder.field(CommonFields.KEY.getPreferredName(), term);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));\n+ builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java", "status": "modified" }, { "diff": "@@ -59,7 +59,7 @@ protected void writeTermTo(StreamOutput out) throws IOException {\n \n @Override\n public String getKeyAsString() {\n- return format.format(term);\n+ return format.format(term).toString();\n }\n \n @Override\n@@ -86,7 +86,7 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) {\n protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {\n builder.field(CommonFields.KEY.getPreferredName(), term);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));\n+ builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java", "status": "modified" }, { "diff": "@@ -70,7 +70,7 @@ public Number getKeyAsNumber() {\n \n @Override\n public String getKeyAsString() {\n- return format.format(termBytes);\n+ return format.format(termBytes).toString();\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java", "status": "modified" }, { "diff": "@@ -47,7 +47,7 @@ protected SingleValue(StreamInput in) throws IOException {\n \n @Override\n public String getValueAsString() {\n- return format.format(value());\n+ return format.format(value()).toString();\n }\n \n @Override\n@@ -78,7 +78,7 @@ protected MultiValue(StreamInput in) throws IOException {\n public abstract double value(String name);\n \n public String valueAsString(String name) {\n- return format.format(value(name));\n+ return format.format(value(name)).toString();\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java", "status": "modified" }, { "diff": "@@ -89,7 +89,7 @@ public InternalAvg doReduce(List<InternalAggregation> aggregations, ReduceContex\n public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {\n builder.field(CommonFields.VALUE.getPreferredName(), count != 0 ? getValue() : null);\n if (count != 0 && format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(getValue()));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(getValue()).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java", "status": "modified" }, { "diff": "@@ -84,7 +84,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n boolean hasValue = !Double.isInfinite(max);\n builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? max : null);\n if (hasValue && format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(max));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(max).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java", "status": "modified" }, { "diff": "@@ -84,7 +84,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n boolean hasValue = !Double.isInfinite(min);\n builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? min : null);\n if (hasValue && format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(min));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(min).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java", "status": "modified" }, { "diff": "@@ -119,7 +119,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n double value = value(keys[i]);\n builder.field(key, value);\n if (format != DocValueFormat.RAW) {\n- builder.field(key + \"_as_string\", format.format(value));\n+ builder.field(key + \"_as_string\", format.format(value).toString());\n }\n }\n builder.endObject();\n@@ -131,7 +131,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n builder.field(CommonFields.KEY.getPreferredName(), keys[i]);\n builder.field(CommonFields.VALUE.getPreferredName(), value);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString());\n }\n builder.endObject();\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java", "status": "modified" }, { "diff": "@@ -102,7 +102,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n double value = value(keys[i]);\n builder.field(key, value);\n if (format != DocValueFormat.RAW) {\n- builder.field(key + \"_as_string\", format.format(value));\n+ builder.field(key + \"_as_string\", format.format(value).toString());\n }\n }\n builder.endObject();\n@@ -114,7 +114,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n builder.field(CommonFields.KEY.getPreferredName(), keys[i]);\n builder.field(CommonFields.VALUE.getPreferredName(), value);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString());\n }\n builder.endObject();\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java", "status": "modified" }, { "diff": "@@ -186,10 +186,10 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n builder.field(Fields.AVG, count != 0 ? getAvg() : null);\n builder.field(Fields.SUM, count != 0 ? sum : null);\n if (count != 0 && format != DocValueFormat.RAW) {\n- builder.field(Fields.MIN_AS_STRING, format.format(min));\n- builder.field(Fields.MAX_AS_STRING, format.format(max));\n- builder.field(Fields.AVG_AS_STRING, format.format(getAvg()));\n- builder.field(Fields.SUM_AS_STRING, format.format(sum));\n+ builder.field(Fields.MIN_AS_STRING, format.format(min).toString());\n+ builder.field(Fields.MAX_AS_STRING, format.format(max).toString());\n+ builder.field(Fields.AVG_AS_STRING, format.format(getAvg()).toString());\n+ builder.field(Fields.SUM_AS_STRING, format.format(sum).toString());\n }\n otherStatsToXCotent(builder, params);\n return builder;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java", "status": "modified" }, { "diff": "@@ -174,8 +174,8 @@ protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params pa\n .endObject();\n \n if (count != 0 && format != DocValueFormat.RAW) {\n- builder.field(Fields.SUM_OF_SQRS_AS_STRING, format.format(sumOfSqrs));\n- builder.field(Fields.VARIANCE_AS_STRING, format.format(getVariance()));\n+ builder.field(Fields.SUM_OF_SQRS_AS_STRING, format.format(sumOfSqrs).toString());\n+ builder.field(Fields.VARIANCE_AS_STRING, format.format(getVariance()).toString());\n builder.field(Fields.STD_DEVIATION_AS_STRING, getStdDeviationAsString());\n \n builder.startObject(Fields.STD_DEVIATION_BOUNDS_AS_STRING)", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java", "status": "modified" }, { "diff": "@@ -83,7 +83,7 @@ public InternalSum doReduce(List<InternalAggregation> aggregations, ReduceContex\n public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {\n builder.field(CommonFields.VALUE.getPreferredName(), sum);\n if (format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.search.aggregations.metrics.tophits;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.Strings;\n@@ -37,6 +38,7 @@\n import org.elasticsearch.search.builder.SearchSourceBuilder;\n import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;\n import org.elasticsearch.search.fetch.StoredFieldsContext;\n+import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;\n import org.elasticsearch.search.fetch.subphase.FetchSourceContext;\n import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;\n import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;\n@@ -67,7 +69,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit\n private List<SortBuilder<?>> sorts = null;\n private HighlightBuilder highlightBuilder;\n private StoredFieldsContext storedFieldsContext;\n- private List<String> fieldDataFields;\n+ private List<DocValueFieldsContext.Field> fieldDataFields;\n private Set<ScriptField> scriptFields;\n private FetchSourceContext fetchSourceContext;\n \n@@ -82,12 +84,16 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException {\n super(in);\n explain = in.readBoolean();\n fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);\n- if (in.readBoolean()) {\n- int size = in.readVInt();\n- fieldDataFields = new ArrayList<>(size);\n- for (int i = 0; i < size; i++) {\n- fieldDataFields.add(in.readString());\n+ if (in.getVersion().before(Version.V_5_5_0_UNRELEASED)) {\n+ if (in.readBoolean()) {\n+ int size = in.readVInt();\n+ fieldDataFields = new ArrayList<>(size);\n+ for (int i = 0; i < size; i++) {\n+ fieldDataFields.add(new DocValueFieldsContext.Field(in.readString(), null));\n+ }\n }\n+ } else if (in.readBoolean()) {\n+ fieldDataFields = in.readList(DocValueFieldsContext.Field::new);\n }\n storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new);\n from = in.readVInt();\n@@ -118,9 +124,13 @@ protected void doWriteTo(StreamOutput out) throws IOException {\n boolean hasFieldDataFields = fieldDataFields != null;\n out.writeBoolean(hasFieldDataFields);\n if (hasFieldDataFields) {\n- out.writeVInt(fieldDataFields.size());\n- for (String fieldName : fieldDataFields) {\n- out.writeString(fieldName);\n+ if (out.getVersion().before(Version.V_5_5_0_UNRELEASED)) {\n+ out.writeVInt(fieldDataFields.size());\n+ for (DocValueFieldsContext.Field field : fieldDataFields) {\n+ out.writeString(field.getName());\n+ }\n+ } else {\n+ out.writeList(fieldDataFields);\n }\n }\n out.writeOptionalWriteable(storedFieldsContext);\n@@ -380,21 +390,29 @@ public StoredFieldsContext storedFields() {\n * the search request.\n */\n public TopHitsAggregationBuilder fieldDataField(String fieldDataField) {\n+ return fieldDataField(fieldDataField, null);\n+ }\n+\n+ /**\n+ * Adds a field to load from the field data cache and return as part of\n+ * the search request.\n+ */\n+ public TopHitsAggregationBuilder fieldDataField(String fieldDataField, String format) {\n if (fieldDataField == null) {\n throw new IllegalArgumentException(\"[fieldDataField] must not be null: [\" + name + \"]\");\n }\n if (fieldDataFields == null) {\n fieldDataFields = new ArrayList<>();\n }\n- fieldDataFields.add(fieldDataField);\n+ fieldDataFields.add(new DocValueFieldsContext.Field(fieldDataField, format));\n return this;\n }\n \n /**\n * Adds fields to load from the field data cache and return as part of\n * the search request.\n */\n- public TopHitsAggregationBuilder fieldDataFields(List<String> fieldDataFields) {\n+ public TopHitsAggregationBuilder fieldDataFields(List<DocValueFieldsContext.Field> fieldDataFields) {\n if (fieldDataFields == null) {\n throw new IllegalArgumentException(\"[fieldDataFields] must not be null: [\" + name + \"]\");\n }\n@@ -408,7 +426,7 @@ public TopHitsAggregationBuilder fieldDataFields(List<String> fieldDataFields) {\n /**\n * Gets the field-data fields.\n */\n- public List<String> fieldDataFields() {\n+ public List<DocValueFieldsContext.Field> fieldDataFields() {\n return fieldDataFields;\n }\n \n@@ -564,9 +582,9 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param\n storedFieldsContext.toXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), builder);\n }\n if (fieldDataFields != null) {\n- builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName());\n- for (String fieldDataField : fieldDataFields) {\n- builder.value(fieldDataField);\n+ builder.startArray(DocValueFieldsContext.DOCVALUE_FIELDS_FIELD.getPreferredName());\n+ for (DocValueFieldsContext.Field fieldDataField : fieldDataFields) {\n+ fieldDataField.toXContent(builder);\n }\n builder.endArray();\n }\n@@ -682,15 +700,10 @@ public static TopHitsAggregationBuilder parse(String aggregationName, QueryParse\n if (SearchSourceBuilder.STORED_FIELDS_FIELD.match(currentFieldName)) {\n factory.storedFieldsContext =\n StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context);\n- } else if (SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.match(currentFieldName)) {\n- List<String> fieldDataFields = new ArrayList<>();\n+ } else if (DocValueFieldsContext.DOCVALUE_FIELDS_FIELD.match(currentFieldName)) {\n+ List<DocValueFieldsContext.Field> fieldDataFields = new ArrayList<>();\n while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n- if (token == XContentParser.Token.VALUE_STRING) {\n- fieldDataFields.add(parser.text());\n- } else {\n- throw new ParsingException(parser.getTokenLocation(), \"Expected [\" + XContentParser.Token.VALUE_STRING\n- + \"] in [\" + currentFieldName + \"] but found [\" + token + \"]\", parser.getTokenLocation());\n- }\n+ fieldDataFields.add(DocValueFieldsContext.Field.fromXContent(parser, context));\n }\n factory.fieldDataFields(fieldDataFields);\n } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName)) {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -47,14 +47,15 @@ public class TopHitsAggregatorFactory extends AggregatorFactory<TopHitsAggregato\n private final Optional<SortAndFormats> sort;\n private final HighlightBuilder highlightBuilder;\n private final StoredFieldsContext storedFieldsContext;\n- private final List<String> docValueFields;\n+ private final List<DocValueFieldsContext.Field> docValueFields;\n private final List<ScriptFieldsContext.ScriptField> scriptFields;\n private final FetchSourceContext fetchSourceContext;\n \n public TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean trackScores,\n Optional<SortAndFormats> sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext,\n- List<String> docValueFields, List<ScriptFieldsContext.ScriptField> scriptFields, FetchSourceContext fetchSourceContext,\n- SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories, Map<String, Object> metaData)\n+ List<DocValueFieldsContext.Field> docValueFields, List<ScriptFieldsContext.ScriptField> scriptFields,\n+ FetchSourceContext fetchSourceContext, SearchContext context, AggregatorFactory<?> parent,\n+ AggregatorFactories.Builder subFactories, Map<String, Object> metaData)\n throws IOException {\n super(name, context, parent, subFactories, metaData);\n this.from = from;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -81,7 +81,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value));\n builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null);\n if (hasValue && format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString());\n }\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java", "status": "modified" }, { "diff": "@@ -100,7 +100,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n boolean hasValue = !Double.isInfinite(value);\n builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null);\n if (hasValue && format != DocValueFormat.RAW) {\n- builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));\n+ builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString());\n }\n builder.startArray(\"keys\");\n for (String key : keys) {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java", "status": "modified" }, { "diff": "@@ -82,7 +82,7 @@ public double percentile(double percent) throws IllegalArgumentException {\n \n @Override\n public String percentileAsString(double percent) {\n- return format.format(percentile(percent));\n+ return format.format(percentile(percent)).toString();\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java", "status": "modified" } ] }
{ "body": "Originated from https://github.com/elastic/kibana/issues/9568\r\n\r\nFor `scaled_float` type the expectation is that aggregations works on the scaled float value. For instance for the value \"6.98\" with a scaling factor of 10 the bucket \"6.9\" should be used.\r\nCurrently the aggregations use the long value of the scaled float to compute the buckets. So instead of \"6.9\" the created bucket is \"6\". \r\n\r\nSteps to reproduce:\r\n\r\n````\r\nPUT t \r\n{\r\n \"mappings\": {\r\n \"t\": {\r\n \"properties\": {\r\n \"ctMultiplier\": {\r\n \"scaling_factor\": 10,\r\n \"type\": \"scaled_float\",\r\n \"store\": true\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPOST t/t/1\r\n{\r\n \"ctMultiplier\": \"6.8\"\r\n}\r\n\r\nPOST t/t/2\r\n{\r\n \"ctMultiplier\": \"6.9\"\r\n}\r\n\r\n\r\nGET _search\r\n{\r\n \"size\": 0,\r\n \"aggs\": {\r\n \"t\": {\r\n \"terms\": {\r\n \"field\": \"ctMultiplier\"\r\n }\r\n }\r\n }\r\n}\r\n`````\r\n", "comments": [], "number": 22350, "title": "`scaled_float` are used as longs in aggregations" }
{ "body": "`scaled_float` should be used as FLOAT in aggregations but currently they are used as LONG.\r\nThis change fixes this issue and adds a simple it test for it.\r\n\r\nFixes #22350", "number": 22351, "review_comments": [ { "body": "I don't think that will change anything in practice, but I think the correct value would be `DOUBLE`?", "created_at": "2016-12-26T17:40:22Z" } ], "title": "Fix scaled_float numeric type in aggregations" }
{ "commits": [ { "message": "Fix scaled_float numeric type in aggregations\n\n`scaled_float` should be used as FLOAT in aggregations but currently they are used as LONG.\nThis change fixes this issue and adds a simple it test for it." }, { "message": "cosmetic" }, { "message": "ScaledFloat fielddata type should be DOUBLE and not FLOAT" } ], "files": [ { "diff": "@@ -504,7 +504,10 @@ public Index index() {\n \n @Override\n public NumericType getNumericType() {\n- return scaledFieldData.getNumericType();\n+ /**\n+ * {@link ScaledFloatLeafFieldData#getDoubleValues()} transforms the raw long values in `scaled` floats.\n+ */\n+ return NumericType.DOUBLE;\n }\n \n }", "filename": "core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java", "status": "modified" }, { "diff": "@@ -182,6 +182,7 @@ public void testFieldData() throws IOException {\n // single-valued\n ft.setName(\"scaled_float1\");\n IndexNumericFieldData fielddata = (IndexNumericFieldData) ft.fielddataBuilder().build(indexSettings, ft, null, null, null);\n+ assertEquals(fielddata.getNumericType(), IndexNumericFieldData.NumericType.DOUBLE);\n AtomicNumericFieldData leafFieldData = fielddata.load(reader.leaves().get(0));\n SortedNumericDoubleValues values = leafFieldData.getDoubleValues();\n values.setDocument(0);", "filename": "core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java", "status": "modified" }, { "diff": "@@ -18,6 +18,9 @@ setup:\n type: long\n double:\n type: double\n+ scaled_float:\n+ type: scaled_float\n+ scaling_factor: 100\n date:\n type: date\n \n@@ -282,6 +285,52 @@ setup:\n \n - match: { aggregations.double_terms.buckets.1.doc_count: 1 }\n \n+---\n+\"Scaled float test\":\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body: { \"scaled_float\": 9.99 }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 2\n+ body: { \"scaled_float\": 9.994 }\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 3\n+ body: { \"scaled_float\": 8.99 }\n+\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"scaled_float_terms\" : { \"terms\" : { \"field\" : \"scaled_float\" } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.scaled_float_terms.buckets: 2 }\n+\n+ - match: { aggregations.scaled_float_terms.buckets.0.key: 9.99 }\n+\n+ - is_false: aggregations.scaled_float_terms.buckets.0.key_as_string\n+\n+ - match: { aggregations.scaled_float_terms.buckets.0.doc_count: 2 }\n+\n+ - match: { aggregations.scaled_float_terms.buckets.1.key: 8.99 }\n+\n+ - is_false: aggregations.scaled_float_terms.buckets.1.key_as_string\n+\n+ - match: { aggregations.scaled_float_terms.buckets.1.doc_count: 1 }\n+\n ---\n \"Date test\":\n - do:", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml", "status": "modified" } ] }
{ "body": "We have a serialization bug somewhere in the stats serialization code. I've now seen ~~five~~ six independent reports ([2](https://github.com/elastic/elasticsearch/pull/21478#issuecomment-264607855), [4](https://discuss.elastic.co/t/random-exceptions-on-transport-layer-and-subsequent-node-disconnections/68704), [5](https://discuss.elastic.co/t/remote-transport-exception-in-5-1-1-w-jdk-1-8-0-111/69607) and ~~two~~ three more that are not linkable) of:\r\n\r\n```\r\n[2016-12-12T09:26:50,081][WARN ][o.e.t.n.Netty4Transport ] [...] exception caught on transport layer [[id: 0xcbdaf621, L:/...:35678 - R:.../...:9300]], closing connection\r\njava.lang.IllegalStateException: Message not fully read (response) for requestId [...], handler [org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler/org.elasticsearch.action.support.nodes.TransportNodesAction$AsyncAction$1@44aa70c], error [false]; resetting\r\n at org.elasticsearch.transport.TcpTransport.messageReceived(TcpTransport.java:1257) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n```\r\n\r\nand the related\r\n\r\n```\r\nCaused by: java.io.EOFException: tried to read: 91755306 bytes but only 114054 remaining\r\n```\r\n\r\nand\r\n\r\n```\r\nCaused by: java.lang.IllegalStateException: No routing state mapped for [103]\r\n at org.elasticsearch.cluster.routing.ShardRoutingState.fromValue(ShardRoutingState.java:71) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n```\r\n\r\nIt seems to always be in some stats response, either a node stats response, or a cluster stats response and it's coming from `TransportBroadcastByNodeAction` and the single action defined by a lambda in `TransportNodesAction$AsyncAction`. We are blowing reading the stream somewhere and then reading garbage subsequently.\r\n\r\nWhatever it is, it's pesky. So far, there is not a reliable reproduction and finding the bug is tricky since these responses serialize the entire world.\r\n\r\nThe first instance of this led to #21478 so that we know the handler name, #22152 so we can detect corruption earlier, and #22223 to clean up some serialization code. Right now, I do not think we've squashed the issue.\r\n", "comments": [ { "body": "This is possibly related to having indices from 2.x in the cluster.", "created_at": "2016-12-21T17:38:04Z" }, { "body": "> This is possibly related to having indices from 2.x in the cluster.\r\n\r\nThis was a red herring, it was far more sinister.", "created_at": "2016-12-21T22:10:50Z" }, { "body": "So what was the solution? We are seeing a simliar error message that is removing our data nodes from our cluster:\r\n\r\n```\r\n[WARN ][o.e.t.n.Netty4Transport ] [...] exception caught on transport layer [[id: 0x2ece9a4b, L:/..:49172 - R:../...:9300]], closing connection\r\njava.lang.IllegalStateException: Message not fully read (response) for requestId [2336886], handler [org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler/org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction$1@5d3a1f72], error [false]; resetting\r\n at org.elasticsearch.transport.TcpTransport.messageReceived(TcpTransport.java:1257) ~[elasticsearch-5.1.1.jar:5.1.1]\r\n at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:74) ~[transport-netty4-5.1.1.jar:5.1.1]\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:359) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:351) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:293) [netty-codec-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:280) [netty-codec-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:396) [netty-codec-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:248) [netty-codec-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:359) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:351) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:359) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:129) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:651) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:536) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:490) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:450) [netty-transport-4.1.6.Final.jar:4.1.6.Final]\r\n at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:873) [netty-common-4.1.6.Final.jar:4.1.6.Final]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_111-internal]\r\n```", "created_at": "2017-01-31T22:24:32Z" }, { "body": "Upgrade to [5.1.2](https://www.elastic.co/downloads/past-releases/elasticsearch-5-1-2) or [5.2.0](https://www.elastic.co/downloads/past-releases/elasticsearch-5-2-0) which both have the fix.", "created_at": "2017-01-31T22:45:54Z" }, { "body": "Using 5.2.1 it seems I'm still having this issue:\r\n\r\n```\r\nFeb 22 10:14:43 pc elasticsearch[10964]: [2017-02-22T10:14:43,374][INFO ][o.e.n.Node ] [elasticsearch-data] initializing ...\r\nFeb 22 10:14:43 pc elasticsearch[10964]: [2017-02-22T10:14:43,508][INFO ][o.e.e.NodeEnvironment ] [elasticsearch-data] using [1] data paths, mounts [[/usr/share/elasticsearch/data (datapool/app/data)]], net usable_space [13.5gb], net total_space [14gb], spins? [possibly], types [zfs]\r\nFeb 22 10:14:43 pc elasticsearch[10964]: [2017-02-22T10:14:43,508][INFO ][o.e.e.NodeEnvironment ] [elasticsearch-data] heap size [1.9gb], compressed ordinary object pointers [true]\r\nFeb 22 10:14:43 pc elasticsearch[10964]: [2017-02-22T10:14:43,587][INFO ][o.e.n.Node ] [elasticsearch-data] node name [elasticsearch-data], node ID [yXgT_M87STebU13T98U4Dw]\r\nFeb 22 10:14:43 pc elasticsearch[10964]: [2017-02-22T10:14:43,591][INFO ][o.e.n.Node ] [elasticsearch-data] version[5.2.1], pid[1], build[db0d481/2017-02-09T22:05:32.386Z], OS[Linux/4.4.0-21-generic/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_121/25.121-b13]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,413][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [aggs-matrix-stats]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,414][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [ingest-common]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,414][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [lang-expression]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,414][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [lang-groovy]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,414][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [lang-mustache]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,414][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [lang-painless]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,414][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [percolator]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,415][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [reindex]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,415][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [transport-netty3]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,415][INFO ][o.e.p.PluginsService ] [elasticsearch-data] loaded module [transport-netty4]\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,418][INFO ][o.e.p.PluginsService ] [elasticsearch-data] no plugins loaded\r\nFeb 22 10:14:47 pc elasticsearch[10964]: [2017-02-22T10:14:47,958][WARN ][o.e.d.s.g.GroovyScriptEngineService] [groovy] scripts are deprecated, use [painless] scripts instead\r\nFeb 22 10:14:52 pc elasticsearch[10964]: [2017-02-22T10:14:52,711][INFO ][o.e.n.Node ] [elasticsearch-data] initialized\r\nFeb 22 10:14:52 pc elasticsearch[10964]: [2017-02-22T10:14:52,713][INFO ][o.e.n.Node ] [elasticsearch-data] starting ...\r\nFeb 22 10:14:52 pc elasticsearch[10964]: [2017-02-22T10:14:52,901][WARN ][i.n.u.i.MacAddressUtil ] Failed to find a usable hardware address from the network interfaces; using random bytes: f3:36:48:59:c3:a6:a0:f5\r\nFeb 22 10:14:53 pc elasticsearch[10964]: [2017-02-22T10:14:53,267][INFO ][o.e.t.TransportService ] [elasticsearch-data] publish_address {10.0.0.3:9300}, bound_addresses {10.0.0.3:9300}, {172.18.0.4:9300}\r\nFeb 22 10:14:53 pc elasticsearch[10964]: [2017-02-22T10:14:53,284][INFO ][o.e.b.BootstrapChecks ] [elasticsearch-data] bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks\r\nFeb 22 10:14:56 pc elasticsearch[10964]: [2017-02-22T10:14:56,906][INFO ][o.e.c.s.ClusterService ] [elasticsearch] added {{elasticsearch-data}{yXgT_M87STebU13T98U4Dw}{iVrGLxOWQAmAEmBSvW4WJQ}{10.0.0.3}{10.0.0.3:9300},}, reason: zen-disco-node-join[{elasticsearch-data}{yXgT_M87STebU13T98U4Dw}{iVrGLxOWQAmAEmBSvW4WJQ}{10.0.0.3}{10.0.0.3:9300}]\r\nFeb 22 10:14:57 pc elasticsearch[10964]: [2017-02-22T10:14:57,327][INFO ][o.e.c.s.ClusterService ] [elasticsearch-data] detected_master {elasticsearch}{pFof7ItCR3Weurx60K77Dw}{pciKrHiiTxuxXsXoy28loQ}{10.0.0.2}{10.0.0.2:9300}, added {{elasticsearch}{pFof7ItCR3Weurx60K77Dw}{pciKrHiiTxuxXsXoy28loQ}{10.0.0.2}{10.0.0.2:9300},}, reason: zen-disco-receive(from master [master {elasticsearch}{pFof7ItCR3Weurx60K77Dw}{pciKrHiiTxuxXsXoy28loQ}{10.0.0.2}{10.0.0.2:9300} committed version [160]])\r\nFeb 22 10:14:58 pc elasticsearch[10964]: [2017-02-22T10:14:58,089][INFO ][o.e.h.HttpServer ] [elasticsearch-data] publish_address {10.0.0.3:9200}, bound_addresses {10.0.0.3:9200}, {172.18.0.4:9200}\r\nFeb 22 10:14:58 pc elasticsearch[10964]: [2017-02-22T10:14:58,089][INFO ][o.e.n.Node ] [elasticsearch-data] started\r\nFeb 22 10:14:59 pc elasticsearch[10964]: [2017-02-22T10:14:59,194][DEBUG][o.e.a.a.i.s.TransportIndicesStatsAction] [elasticsearch] failed to execute [indices:monitor/stats] on node [yXgT_M87STebU13T98U4Dw]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch-data][10.0.0.3:9300][indices:monitor/stats[n]] disconnected\r\nFeb 22 10:14:59 pc elasticsearch[10964]: [2017-02-22T10:14:59,199][DEBUG][o.e.a.a.c.n.s.TransportNodesStatsAction] [elasticsearch] failed to execute on node [yXgT_M87STebU13T98U4Dw]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch-data][10.0.0.3:9300][cluster:monitor/nodes/stats[n]] disconnected\r\nFeb 22 10:14:59 pc elasticsearch[10964]: [2017-02-22T10:14:59,201][INFO ][o.e.c.s.ClusterService ] [elasticsearch] removed {{elasticsearch-data}{yXgT_M87STebU13T98U4Dw}{iVrGLxOWQAmAEmBSvW4WJQ}{10.0.0.3}{10.0.0.3:9300},}, reason: zen-disco-node-failed({elasticsearch-data}{yXgT_M87STebU13T98U4Dw}{iVrGLxOWQAmAEmBSvW4WJQ}{10.0.0.3}{10.0.0.3:9300}), reason(transport disconnected)[{elasticsearch-data}{yXgT_M87STebU13T98U4Dw}{iVrGLxOWQAmAEmBSvW4WJQ}{10.0.0.3}{10.0.0.3:9300} transport disconnected]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: [2017-02-22T10:14:59,200][WARN ][o.e.a.a.c.n.s.TransportNodesStatsAction] [elasticsearch] not accumulating exceptions, excluding exception from response\r\nFeb 22 10:14:59 pc elasticsearch[10964]: org.elasticsearch.action.FailedNodeException: Failed node [yXgT_M87STebU13T98U4Dw]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at org.elasticsearch.action.support.nodes.TransportNodesAction$AsyncAction.onFailure(TransportNodesAction.java:247) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at org.elasticsearch.action.support.nodes.TransportNodesAction$AsyncAction.access$300(TransportNodesAction.java:160) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at org.elasticsearch.action.support.nodes.TransportNodesAction$AsyncAction$1.handleException(TransportNodesAction.java:219) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleException(TransportService.java:1024) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TransportService$Adapter.lambda$onNodeDisconnected$6(TransportService.java:851) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:527) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_121]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_121]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: #011at java.lang.Thread.run(Thread.java:745) [?:1.8.0_121]\r\nFeb 22 10:14:59 pc elasticsearch[10964]: Caused by: org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch-data][10.0.0.3:9300][cluster:monitor/nodes/stats[n]] disconnected\r\nFeb 22 10:14:59 pc elasticsearch[10964]: [2017-02-22T10:14:59,202][INFO ][o.e.c.r.DelayedAllocationService] [elasticsearch] scheduling reroute for delayed shards in [59.9s] (4 delayed shards)\r\n```\r\n\r\nIs there anything I can do?", "created_at": "2017-02-22T09:19:44Z" }, { "body": "@mostolog are you running an older version of ES in your cluster anywhere?", "created_at": "2017-02-22T09:38:33Z" }, { "body": "I would say not, but after so many tests I don't know what I tried and what I didn't.\r\nI don't know it could be related, but cerebro was running each time it happened. (I'm trying to get a replication use case)", "created_at": "2017-02-22T09:42:52Z" }, { "body": "@mostolog this request will tell you what versions are running:\r\n\r\n curl -XGET \"http://localhost:9200/_nodes?pretty&filter_path=nodes.*.version\"\r\n\r\n(btw, cerebro is the kopf rewrite https://github.com/lmenezes/cerebro so stats related)", "created_at": "2017-02-22T09:46:27Z" }, { "body": "```\r\n{\r\n \"nodes\" : {\r\n \"G2cl2gwLTQaQUY4Oca5bHg\" : {\r\n \"version\" : \"5.2.1\"\r\n },\r\n \"9L57Rh05QvqXvC9LqsUvSg\" : {\r\n \"version\" : \"5.2.1\"\r\n }\r\n }\r\n}\r\n```", "created_at": "2017-02-22T09:48:27Z" }, { "body": "@mostolog how can you tell that this is related to stats serialization? I can't see a stacktrace in your case", "created_at": "2017-02-22T09:51:42Z" }, { "body": "Just reproduced a similar error without cerebro or any references to stats, so'll probably have to look elsewhere:\r\n```\r\nFeb 22 10:49:27 pc elasticsearch[10964]: [2017-02-22T10:49:27,708][INFO ][o.e.c.m.MetaDataCreateIndexService] [elasticsearch] [group:app1@2017-02-22-10] creating index, cause [auto(bulk api)], templates [], shards [5]/[1], mappings []\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:30,984][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch] [gc][young][356][2] duration [2.7s], collections [1]/[3.1s], total [2.7s]/[2.9s], memory [568.9mb]->[298.8mb]/[1.9gb], all_pools {[young] [521.6mb]->[7.4mb]/[532.5mb]}{[survivor] [47.2mb]->[66.4mb]/[66.5mb]}{[old] [0b]->[228.3mb]/[1.3gb]}\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,021][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch] [gc][356] overhead, spent [2.7s] collecting in the last [3.1s]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,079][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] create_mapping [logs]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,102][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] update_mapping [logs]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,152][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] update_mapping [logs]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,755][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] update_mapping [logs]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,381][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch][10.0.0.2:9300][indices:admin/mapping/put] disconnected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,381][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch][10.0.0.2:9300][indices:admin/mapping/put] disconnected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,432][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeNotConnectedException: [elasticsearch][10.0.0.2:9300] Node not connected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,432][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeNotConnectedException: [elasticsearch][10.0.0.2:9300] Node not connected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,435][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeNotConnectedException: [elasticsearch][10.0.0.2:9300] Node not connected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,454][INFO ][o.e.d.z.ZenDiscovery ] [elasticsearch-data] master_left [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], reason [transport disconnected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,456][WARN ][o.e.d.z.ZenDiscovery ] [elasticsearch-data] master left (reason = transport disconnected), current nodes: nodes:\r\nFeb 22 10:49:32 pc elasticsearch[10964]: {elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}, master\r\nFeb 22 10:49:32 pc elasticsearch[10964]: {elasticsearch-data}{9L57Rh05QvqXvC9LqsUvSg}{-WGBky5yQt2VC1ngcoy89g}{10.0.0.3}{10.0.0.3:9300}, local\r\nFeb 22 10:49:32 pc elasticsearch[10964]: \r\nFeb 22 10:49:35 pc elasticsearch[10964]: [2017-02-22T10:49:35,625][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch-data] [gc][young][336][2] duration [3.1s], collections [1]/[3.5s], total [3.1s]/[3.2s], memory [558.4mb]->[312.1mb]/[1.9gb], all_pools {[young] [511.1mb]->[4.7mb]/[532.5mb]}{[survivor] [47.2mb]->[66.5mb]/[66.5mb]}{[old] [0b]->[244.3mb]/[1.3gb]}\r\nFeb 22 10:49:35 pc elasticsearch[10964]: [2017-02-22T10:49:35,632][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch-data] [gc][336] overhead, spent [3.1s] collecting in the last [3.5s]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: [2017-02-22T10:49:35,658][WARN ][o.e.c.NodeConnectionsService] [elasticsearch-data] failed to connect to node {elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300} (tried [1] times)\r\nFeb 22 10:49:35 pc elasticsearch[10964]: org.elasticsearch.transport.ConnectTransportException: [elasticsearch][10.0.0.2:9300] connect_timeout[30s]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.netty4.Netty4Transport.connectToChannels(Netty4Transport.java:370) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TcpTransport.openConnection(TcpTransport.java:495) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:460) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:318) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:305) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.NodeConnectionsService.validateNodeConnected(NodeConnectionsService.java:121) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.NodeConnectionsService.connectToNodes(NodeConnectionsService.java:87) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.service.ClusterService.publishAndApplyChanges(ClusterService.java:775) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.service.ClusterService.runTasks(ClusterService.java:628) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:1112) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:527) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_121]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_121]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at java.lang.Thread.run(Thread.java:745) [?:1.8.0_121]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: Caused by: io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 10.0.0.2/10.0.0.2:9300\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:346) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:630) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:527) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:481) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:441) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011... 1 more\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,149][INFO ][o.e.n.Node ] [elasticsearch] initializing ...\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,258][INFO ][o.e.e.NodeEnvironment ] [elasticsearch] using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/mapper/Disk1-root)]], net usable_space [2.4gb], net total_space [8.3gb], spins? [possibly], types [ext4]\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,259][INFO ][o.e.e.NodeEnvironment ] [elasticsearch] heap size [1.9gb], compressed ordinary object pointers [true]\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,269][INFO ][o.e.n.Node ] [elasticsearch] node name [elasticsearch], node ID [G2cl2gwLTQaQUY4Oca5bHg]\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,271][INFO ][o.e.n.Node ] [elasticsearch] version[5.2.1], pid[1], build[db0d481/2017-02-09T22:05:32.386Z], OS[Linux/4.4.0-21-generic/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_121/25.121-b13]\r\n...\r\nFeb 22 10:49:27 pc elasticsearch[10964]: [2017-02-22T10:49:27,708][INFO ][o.e.c.m.MetaDataCreateIndexService] [elasticsearch] [group:app1@2017-02-22-10] creating index, cause [auto(bulk api)], templates [], shards [5]/[1], mappings []\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:30,984][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch] [gc][young][356][2] duration [2.7s], collections [1]/[3.1s], total [2.7s]/[2.9s], memory [568.9mb]->[298.8mb]/[1.9gb], all_pools {[young] [521.6mb]->[7.4mb]/[532.5mb]}{[survivor] [47.2mb]->[66.4mb]/[66.5mb]}{[old] [0b]->[228.3mb]/[1.3gb]}\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,021][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch] [gc][356] overhead, spent [2.7s] collecting in the last [3.1s]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,079][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] create_mapping [logs]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,102][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] update_mapping [logs]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,152][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] update_mapping [logs]\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,755][INFO ][o.e.c.m.MetaDataMappingService] [elasticsearch] [group:app1@2017-02-22-10/NpYVbZxHQuG0far9dfcHsQ] update_mapping [logs]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,381][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch][10.0.0.2:9300][indices:admin/mapping/put] disconnected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,381][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeDisconnectedException: [elasticsearch][10.0.0.2:9300][indices:admin/mapping/put] disconnected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,432][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeNotConnectedException: [elasticsearch][10.0.0.2:9300] Node not connected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,432][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeNotConnectedException: [elasticsearch][10.0.0.2:9300] Node not connected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,435][DEBUG][o.e.a.a.i.m.p.TransportPutMappingAction] [elasticsearch-data] connection exception while trying to forward request with action name [indices:admin/mapping/put] to master node [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], scheduling a retry. Error: [org.elasticsearch.transport.NodeNotConnectedException: [elasticsearch][10.0.0.2:9300] Node not connected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,454][INFO ][o.e.d.z.ZenDiscovery ] [elasticsearch-data] master_left [{elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}], reason [transport disconnected]\r\nFeb 22 10:49:32 pc elasticsearch[10964]: [2017-02-22T10:49:32,456][WARN ][o.e.d.z.ZenDiscovery ] [elasticsearch-data] master left (reason = transport disconnected), current nodes: nodes:\r\nFeb 22 10:49:32 pc elasticsearch[10964]: {elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300}, master\r\nFeb 22 10:49:32 pc elasticsearch[10964]: {elasticsearch-data}{9L57Rh05QvqXvC9LqsUvSg}{-WGBky5yQt2VC1ngcoy89g}{10.0.0.3}{10.0.0.3:9300}, local\r\nFeb 22 10:49:32 pc elasticsearch[10964]: \r\nFeb 22 10:49:35 pc elasticsearch[10964]: [2017-02-22T10:49:35,625][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch-data] [gc][young][336][2] duration [3.1s], collections [1]/[3.5s], total [3.1s]/[3.2s], memory [558.4mb]->[312.1mb]/[1.9gb], all_pools {[young] [511.1mb]->[4.7mb]/[532.5mb]}{[survivor] [47.2mb]->[66.5mb]/[66.5mb]}{[old] [0b]->[244.3mb]/[1.3gb]}\r\nFeb 22 10:49:35 pc elasticsearch[10964]: [2017-02-22T10:49:35,632][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch-data] [gc][336] overhead, spent [3.1s] collecting in the last [3.5s]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: [2017-02-22T10:49:35,658][WARN ][o.e.c.NodeConnectionsService] [elasticsearch-data] failed to connect to node {elasticsearch}{G2cl2gwLTQaQUY4Oca5bHg}{5GA7XQjcTKWJuGAb1wwILg}{10.0.0.2}{10.0.0.2:9300} (tried [1] times)\r\nFeb 22 10:49:35 pc elasticsearch[10964]: org.elasticsearch.transport.ConnectTransportException: [elasticsearch][10.0.0.2:9300] connect_timeout[30s]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.netty4.Netty4Transport.connectToChannels(Netty4Transport.java:370) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TcpTransport.openConnection(TcpTransport.java:495) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:460) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:318) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:305) ~[elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.NodeConnectionsService.validateNodeConnected(NodeConnectionsService.java:121) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.NodeConnectionsService.connectToNodes(NodeConnectionsService.java:87) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.service.ClusterService.publishAndApplyChanges(ClusterService.java:775) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.service.ClusterService.runTasks(ClusterService.java:628) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:1112) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:527) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) [elasticsearch-5.2.1.jar:5.2.1]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_121]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_121]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at java.lang.Thread.run(Thread.java:745) [?:1.8.0_121]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: Caused by: io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 10.0.0.2/10.0.0.2:9300\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:346) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:630) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:527) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:481) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:441) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858) ~[?:?]\r\nFeb 22 10:49:35 pc elasticsearch[10964]: #011... 1 more\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,149][INFO ][o.e.n.Node ] [elasticsearch] initializing ...\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,258][INFO ][o.e.e.NodeEnvironment ] [elasticsearch] using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/mapper/Disk1-root)]], net usable_space [2.4gb], net total_space [8.3gb], spins? [possibly], types [ext4]\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,259][INFO ][o.e.e.NodeEnvironment ] [elasticsearch] heap size [1.9gb], compressed ordinary object pointers [true]\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,269][INFO ][o.e.n.Node ] [elasticsearch] node name [elasticsearch], node ID [G2cl2gwLTQaQUY4Oca5bHg]\r\nFeb 22 10:49:38 pc elasticsearch[10964]: [2017-02-22T10:49:38,271][INFO ][o.e.n.Node ] [elasticsearch] version[5.2.1], pid[1], build[db0d481/2017-02-09T22:05:32.386Z], OS[Linux/4.4.0-21-generic/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_121/25.121-b13]\r\n```\r\n\r\ndoes it sound familiar to you?\r\n\r\n@s1monw among other things, the first to complain was cerebro.\r\n\r\nThanks and sorry for rebumping", "created_at": "2017-02-22T09:54:28Z" }, { "body": "@mostolog I think that your node is GCing and that's leading to disconnects and timeouts:\r\n\r\n```\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:30,984][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch] [gc][young][356][2] duration [2.7s], collections [1]/[3.1s], total [2.7s]/[2.9s], memory [568.9mb]->[298.8mb]/[1.9gb], all_pools {[young] [521.6mb]->[7.4mb]/[532.5mb]}{[survivor] [47.2mb]->[66.4mb]/[66.5mb]}{[old] [0b]->[228.3mb]/[1.3gb]}\r\nFeb 22 10:49:31 pc elasticsearch[10964]: [2017-02-22T10:49:31,021][WARN ][o.e.m.j.JvmGcMonitorService] [elasticsearch] [gc][356] overhead, spent [2.7s] collecting in the last [3.1s]\r\n```", "created_at": "2017-02-22T19:29:45Z" }, { "body": "@jasontedor Any way to avoid GC having such an effect on my node?", "created_at": "2017-02-23T09:08:19Z" }, { "body": "@mostolog please use the [forum](https://discuss.elastic.co) for questions like this. Your issue is unrelated to this.", "created_at": "2017-02-23T09:15:18Z" }, { "body": "Shouldn't this bug NOT have tag 5.1.1?", "created_at": "2017-04-10T05:44:59Z" }, { "body": "> Shouldn't this bug NOT have tag 5.1.1?\r\n\r\nWhy do you say that @astefan? If you're looking for the versions that the issue is fixed in, you have to look at the labels on the corresponding PR: #22317.", "created_at": "2017-04-10T10:14:07Z" }, { "body": "I thought the labels on the GH issue show the fixed versions, not the affected ones?... I may be though remembering wrong :-).", "created_at": "2017-04-10T10:16:07Z" }, { "body": "> I thought the labels on the GH issue show the fixed versions, not the affected ones?... I may be though remembering wrong :-).\r\n\r\nThe version labels on corresponding PRs mean that.", "created_at": "2017-04-10T10:35:17Z" }, { "body": "do you have the plan to fix this problem in version 2.4.5 @jasontedor @nik9000 \r\nwe see this bug in our es cluster with version 2.4.5。\r\n\r\n \r\n![image](https://user-images.githubusercontent.com/1811544/33199834-0ba2c89c-d12d-11e7-9782-1b221a852200.png)\r\n\r\n![image](https://user-images.githubusercontent.com/1811544/33199264-8a2bb5e6-d12a-11e7-934c-9b75fbd68d94.png)\r\n\r\n![image](https://user-images.githubusercontent.com/1811544/33199150-043e888c-d12a-11e7-845f-0b17f7ef2d92.png)\r\n", "created_at": "2017-11-24T07:09:30Z" }, { "body": "@hjxhjh As I explained [yesterday](https://github.com/elastic/elasticsearch/pull/22317#issuecomment-346644676), this bug is not present in 2.x and the 2.x series is no longer under maintenance.\r\n\r\nWhat you are seeing in your cluster has the same symptoms as the bug here, but I assure you that the cause is not the same, it's a different bug. In the same way that you can be vomiting (the symptom) because of the flu (a bug), or because of something you ate (another bug), seeing the same symptoms does not mean the cause is the same.\r\n\r\nHowever, we will not be putting any time into investigating this bug, nor will be releasing a fix for this bug: the 2.x series is no longer under maintenance.\r\n", "created_at": "2017-11-24T13:02:04Z" }, { "body": "got it. thank you @jasontedor .", "created_at": "2017-11-26T07:26:59Z" }, { "body": "See pretty the same stacktrace in ElasticSearch v6.1.2, could you look into it the same issue or not?\r\n\r\n```[2018-09-02T16:11:59,836][WARN ][o.e.t.n.Netty4Transport ] [*******] exception caught on transport layer [org.elasticsearch.transport.netty4.NettyTcpChannel@1a394567], closing connection\r\njava.lang.IllegalStateException: Message not fully read (response) for requestId [1096618678], handler [org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler/org.elasticsearch.action.support.nodes.TransportNodesAction$AsyncAction$1@1d96eb45], error [false]; resetting\r\n\tat org.elasticsearch.transport.TcpTransport.messageReceived(TcpTransport.java:1405) ~[elasticsearch-6.1.2.jar:6.1.2]\r\n\tat org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:64) ~[transport-netty4-6.1.2.jar:6.1.2]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310) [netty-codec-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:297) [netty-codec-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:413) [netty-codec-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:265) [netty-codec-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.handler.logging.LoggingHandler.channelRead(LoggingHandler.java:241) [netty-handler-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458) [netty-transport-4.1.13.Final.jar:4.1.13.Final]\r\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858) [netty-common-4.1.13.Final.jar:4.1.13.Final]\r\n\tat java.lang.Thread.run(Thread.java:748) [?:1.8.0_161]```", "created_at": "2018-09-06T14:00:08Z" } ], "number": 22285, "title": "Broken stats serialization" }
{ "body": "Today we try to pull stats from index writer but we do not get a\r\nconsistent view of stats. Under heavy indexing, this inconsistency can\r\nbe very skewed indeed. In particular, it can lead to the number of\r\ndeleted docs being reported as negative and this leads to serialization\r\nissues. Instead, we should provide a consistent view of the stats by\r\nusing an index reader.\r\n\r\nCloses #22285", "number": 22317, "review_comments": [ { "body": "Maybe wait for https://github.com/elastic/elasticsearch/pull/22314?", "created_at": "2016-12-21T22:02:28Z" }, { "body": "Yeah, that's fine with me. I just needed this to fail with something that would get caught and sent back in the response instead of uncaught causing the request to timeout. I'll take a look at #22314 now.", "created_at": "2016-12-21T22:04:08Z" }, { "body": "can we keep a simple test in `IndexShardTests` pls", "created_at": "2016-12-22T06:57:46Z" }, { "body": "I pushed a test in f3f650624e0cb43de62626e8b15da3163c1fceb9.", "created_at": "2016-12-22T16:05:42Z" }, { "body": "Instead of saying \"variable-length long\" I'd `{@link StreamOutput#writeVLong}`.\r\n", "created_at": "2016-12-22T19:07:17Z" }, { "body": "numberOf**Indexing**Threads", "created_at": "2016-12-22T19:09:18Z" } ], "title": "Use reader for doc stats" }
{ "commits": [ { "message": "Use reader for doc stats\n\nToday we try to pull stats from index writer but we do not get a\nconsistent view of stats. Under heavy indexing, this inconsistency can\nbe very skewed indeed. In particular, it can lead to the number of\ndeleted docs being reported as negative and this leads to serialization\nissues. Instead, we should provide a consistent view of the stats by\nusing an index reader." }, { "message": "Simplify concurrent indexing and get stats test\n\nThis commit simplifies a test that concurrently indexes while getting\nstats. The simplification is the removal of an unnecessary thread." }, { "message": "Add basic doc stats test\n\nThis commit adds a basic doc stats test at the index shard level." }, { "message": "Merge branch 'master' into use-reader-for-doc-stats\n\n* master: (22 commits)\n Support negative numbers in writeVLong (#22314)\n UnicastZenPing's PingingRound should prevent opening connections after being closed\n Add task to clean idea build directory. Make cleanIdea task invoke it.\n add trace logging to UnicastZenPingTests.testResolveReuseExistingNodeConnections\n Adds ingest processor headers to exception for unknown processor. (#22315)\n Remove much ceremony from parsing client yaml test suites (#22311)\n Support numeric bounds with decimal parts for long/integer/short/byte datatypes (#21972)\n inner hits: Don't inline inner hits if the query the inner hits is inlined into can't resolve mappings and ignore_unmapped has been set to true\n Fix stackoverflow error on InternalNumericMetricAggregation\n Date detection should not rely on a hardcoded set of characters. (#22171)\n `value_type` is useful regardless of scripting. (#22160)\n Improve concurrency of ShardCoreKeyMap. (#22316)\n fixed jdocs and removed already fixed norelease\n Adds abstract test classes for serialisation (#22281)\n Introduce translog no-op\n Provide helpful error message if a plugin exists\n Clear static variable after suite\n Repeated language analyzers (#22240)\n Restore deprecation warning for invalid match_mapping_type values (#22304)\n Make `-0` compare less than `+0` consistently. (#22173)\n ..." }, { "message": "Merge branch 'master' into use-reader-for-doc-stats\n\n* master:\n Enable assertions in integration tests" }, { "message": "Refresh before executing rollover\n\nThis commit adds a refresh before executing a rollover, otherwise the\nsingle indexed doc might not be visible to the rollover command." } ], "files": [ { "diff": "@@ -64,7 +64,6 @@\n import org.elasticsearch.index.mapper.Uid;\n import org.elasticsearch.index.merge.MergeStats;\n import org.elasticsearch.index.seqno.SequenceNumbersService;\n-import org.elasticsearch.index.shard.DocsStats;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.store.Store;\n import org.elasticsearch.index.translog.Translog;\n@@ -1374,16 +1373,6 @@ public long getLastWriteNanos() {\n return this.lastWriteNanos;\n }\n \n- /**\n- * Returns the engines current document statistics\n- */\n- public DocsStats getDocStats() {\n- try (Engine.Searcher searcher = acquireSearcher(\"doc_stats\")) {\n- IndexReader reader = searcher.reader();\n- return new DocsStats(reader.numDocs(), reader.numDeletedDocs());\n- }\n- }\n-\n /**\n * Called for each new opened engine searcher to warm new segments\n *", "filename": "core/src/main/java/org/elasticsearch/index/engine/Engine.java", "status": "modified" }, { "diff": "@@ -64,7 +64,6 @@\n import org.elasticsearch.index.merge.OnGoingMerge;\n import org.elasticsearch.index.seqno.SeqNoStats;\n import org.elasticsearch.index.seqno.SequenceNumbersService;\n-import org.elasticsearch.index.shard.DocsStats;\n import org.elasticsearch.index.shard.ElasticsearchMergePolicy;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.shard.TranslogRecoveryPerformer;\n@@ -1620,14 +1619,6 @@ public SequenceNumbersService seqNoService() {\n return seqNoService;\n }\n \n- @Override\n- public DocsStats getDocStats() {\n- final int numDocs = indexWriter.numDocs();\n- final int maxDoc = indexWriter.maxDoc();\n- return new DocsStats(numDocs, maxDoc-numDocs);\n- }\n-\n-\n /**\n * Returns the number of times a version was looked up either from the index.\n * Note this is only available if assertions are enabled", "filename": "core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java", "status": "modified" }, { "diff": "@@ -669,9 +669,9 @@ public FlushStats flushStats() {\n }\n \n public DocsStats docStats() {\n- readAllowed();\n- final Engine engine = getEngine();\n- return engine.getDocStats();\n+ try (final Engine.Searcher searcher = acquireSearcher(\"doc_stats\")) {\n+ return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs());\n+ }\n }\n \n /**", "filename": "core/src/main/java/org/elasticsearch/index/shard/IndexShard.java", "status": "modified" }, { "diff": "@@ -2548,33 +2548,6 @@ public void testHandleDocumentFailure() throws Exception {\n \n }\n \n- public void testDocStats() throws IOException {\n- final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below\n- for (int i = 0; i < numDocs; i++) {\n- ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), \"test\", null, testDocument(), new BytesArray(\"{}\"), null);\n- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);\n- Engine.IndexResult indexResult = engine.index(firstIndexRequest);\n- assertThat(indexResult.getVersion(), equalTo(1L));\n- }\n- DocsStats docStats = engine.getDocStats();\n- assertEquals(numDocs, docStats.getCount());\n- assertEquals(0, docStats.getDeleted());\n- engine.forceMerge(randomBoolean(), 1, false, false, false);\n-\n- ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), \"test\", null, testDocument(), new BytesArray(\"{}\"), null);\n- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);\n- Engine.IndexResult index = engine.index(firstIndexRequest);\n- assertThat(index.getVersion(), equalTo(2L));\n- engine.flush(); // flush - buffered deletes are not counted\n- docStats = engine.getDocStats();\n- assertEquals(1, docStats.getDeleted());\n- assertEquals(numDocs, docStats.getCount());\n- engine.forceMerge(randomBoolean(), 1, false, false, false);\n- docStats = engine.getDocStats();\n- assertEquals(0, docStats.getDeleted());\n- assertEquals(numDocs, docStats.getCount());\n- }\n-\n public void testDoubleDelivery() throws IOException {\n final ParsedDocument doc = testParsedDocument(\"1\", \"1\", \"test\", null, testDocumentWithTextField(), new BytesArray(\"{}\".getBytes(Charset.defaultCharset())), null);\n Engine.Index operation = randomAppendOnly(1, doc, false);", "filename": "core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java", "status": "modified" }, { "diff": "@@ -984,33 +984,6 @@ public void testNoTranslog() {\n }\n }\n \n- public void testDocStats() throws IOException {\n- final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below\n- for (int i = 0; i < numDocs; i++) {\n- ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), \"test\", null, testDocument(), new BytesArray(\"{}\"), null);\n- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);\n- Engine.IndexResult indexResult = primaryEngine.index(firstIndexRequest);\n- assertThat(indexResult.getVersion(), equalTo(1L));\n- }\n- DocsStats docStats = primaryEngine.getDocStats();\n- assertEquals(numDocs, docStats.getCount());\n- assertEquals(0, docStats.getDeleted());\n-\n- docStats = replicaEngine.getDocStats();\n- assertEquals(0, docStats.getCount());\n- assertEquals(0, docStats.getDeleted());\n- primaryEngine.flush();\n-\n- docStats = replicaEngine.getDocStats();\n- assertEquals(0, docStats.getCount());\n- assertEquals(0, docStats.getDeleted());\n- replicaEngine.refresh(\"test\");\n- docStats = replicaEngine.getDocStats();\n- assertEquals(numDocs, docStats.getCount());\n- assertEquals(0, docStats.getDeleted());\n- primaryEngine.forceMerge(randomBoolean(), 1, false, false, false);\n- }\n-\n public void testRefreshListenersFails() throws IOException {\n EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(),\n new RefreshListeners(null, null, null, logger));", "filename": "core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java", "status": "modified" }, { "diff": "@@ -33,6 +33,7 @@\n import org.apache.lucene.util.Constants;\n import org.elasticsearch.Version;\n import org.elasticsearch.action.admin.indices.flush.FlushRequest;\n+import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;\n import org.elasticsearch.action.admin.indices.stats.CommonStats;\n import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;\n import org.elasticsearch.action.admin.indices.stats.ShardStats;\n@@ -57,11 +58,13 @@\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.lease.Releasable;\n import org.elasticsearch.common.lease.Releasables;\n+import org.elasticsearch.common.lucene.uid.Versions;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.env.NodeEnvironment;\n+import org.elasticsearch.index.VersionType;\n import org.elasticsearch.index.engine.Engine;\n import org.elasticsearch.index.engine.EngineException;\n import org.elasticsearch.index.fielddata.FieldDataStats;\n@@ -73,6 +76,7 @@\n import org.elasticsearch.index.mapper.SeqNoFieldMapper;\n import org.elasticsearch.index.mapper.Uid;\n import org.elasticsearch.index.mapper.UidFieldMapper;\n+import org.elasticsearch.index.seqno.SequenceNumbersService;\n import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;\n import org.elasticsearch.index.store.Store;\n import org.elasticsearch.index.translog.Translog;\n@@ -112,13 +116,15 @@\n import java.util.concurrent.atomic.AtomicInteger;\n import java.util.concurrent.atomic.AtomicReference;\n import java.util.function.BiConsumer;\n+import java.util.stream.Collectors;\n+import java.util.stream.IntStream;\n \n import static java.util.Collections.emptyMap;\n import static java.util.Collections.emptySet;\n import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex;\n-import static org.elasticsearch.common.lucene.Lucene.readScoreDoc;\n import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n@@ -1357,6 +1363,91 @@ public void testRecoverFromLocalShard() throws IOException {\n closeShards(sourceShard, targetShard);\n }\n \n+ public void testDocStats() throws IOException {\n+ IndexShard indexShard = null;\n+ try {\n+ indexShard = newStartedShard();\n+ final long numDocs = randomIntBetween(2, 32); // at least two documents so we have docs to delete\n+ final long numDocsToDelete = randomIntBetween(1, Math.toIntExact(numDocs));\n+ for (int i = 0; i < numDocs; i++) {\n+ final String id = Integer.toString(i);\n+ final ParsedDocument doc =\n+ testParsedDocument(id, id, \"test\", null, new ParseContext.Document(), new BytesArray(\"{}\"), null);\n+ final Engine.Index index =\n+ new Engine.Index(\n+ new Term(\"_uid\", id),\n+ doc,\n+ SequenceNumbersService.UNASSIGNED_SEQ_NO,\n+ 0,\n+ Versions.MATCH_ANY,\n+ VersionType.INTERNAL,\n+ PRIMARY,\n+ System.nanoTime(),\n+ -1,\n+ false);\n+ final Engine.IndexResult result = indexShard.index(index);\n+ assertThat(result.getVersion(), equalTo(1L));\n+ }\n+\n+ indexShard.refresh(\"test\");\n+ {\n+ final DocsStats docsStats = indexShard.docStats();\n+ assertThat(docsStats.getCount(), equalTo(numDocs));\n+ assertThat(docsStats.getDeleted(), equalTo(0L));\n+ }\n+\n+ final List<Integer> ids = randomSubsetOf(\n+ Math.toIntExact(numDocsToDelete),\n+ IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList()));\n+ for (final Integer i : ids) {\n+ final String id = Integer.toString(i);\n+ final ParsedDocument doc = testParsedDocument(id, id, \"test\", null, new ParseContext.Document(), new BytesArray(\"{}\"), null);\n+ final Engine.Index index =\n+ new Engine.Index(\n+ new Term(\"_uid\", id),\n+ doc,\n+ SequenceNumbersService.UNASSIGNED_SEQ_NO,\n+ 0,\n+ Versions.MATCH_ANY,\n+ VersionType.INTERNAL,\n+ PRIMARY,\n+ System.nanoTime(),\n+ -1,\n+ false);\n+ final Engine.IndexResult result = indexShard.index(index);\n+ assertThat(result.getVersion(), equalTo(2L));\n+ }\n+\n+ // flush the buffered deletes\n+ final FlushRequest flushRequest = new FlushRequest();\n+ flushRequest.force(false);\n+ flushRequest.waitIfOngoing(false);\n+ indexShard.flush(flushRequest);\n+\n+ indexShard.refresh(\"test\");\n+ {\n+ final DocsStats docStats = indexShard.docStats();\n+ assertThat(docStats.getCount(), equalTo(numDocs));\n+ assertThat(docStats.getDeleted(), equalTo(numDocsToDelete));\n+ }\n+\n+ // merge them away\n+ final ForceMergeRequest forceMergeRequest = new ForceMergeRequest();\n+ forceMergeRequest.onlyExpungeDeletes(randomBoolean());\n+ forceMergeRequest.maxNumSegments(1);\n+ indexShard.forceMerge(forceMergeRequest);\n+\n+ indexShard.refresh(\"test\");\n+ {\n+ final DocsStats docStats = indexShard.docStats();\n+ assertThat(docStats.getCount(), equalTo(numDocs));\n+ assertThat(docStats.getDeleted(), equalTo(0L));\n+ }\n+ } finally {\n+ closeShards(indexShard);\n+ }\n+ }\n+\n /** A dummy repository for testing which just needs restore overridden */\n private abstract static class RestoreOnlyRepository extends AbstractLifecycleComponent implements Repository {\n private final String indexName;", "filename": "core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java", "status": "modified" }, { "diff": "@@ -21,16 +21,20 @@\n \n import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;\n import org.elasticsearch.action.DocWriteResponse;\n+import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;\n+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;\n import org.elasticsearch.action.admin.indices.stats.CommonStats;\n import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;\n import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;\n import org.elasticsearch.action.admin.indices.stats.IndexStats;\n+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;\n import org.elasticsearch.action.admin.indices.stats.ShardStats;\n import org.elasticsearch.action.get.GetResponse;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.index.IndexResponse;\n import org.elasticsearch.action.search.SearchType;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.bytes.BytesReference;\n@@ -54,14 +58,27 @@\n import org.elasticsearch.test.ESIntegTestCase.Scope;\n \n import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n import java.util.EnumSet;\n+import java.util.List;\n import java.util.Random;\n+import java.util.concurrent.BrokenBarrierException;\n+import java.util.concurrent.CopyOnWriteArrayList;\n+import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.CyclicBarrier;\n+import java.util.concurrent.ExecutionException;\n+import java.util.concurrent.TimeUnit;\n+import java.util.concurrent.atomic.AtomicBoolean;\n+import java.util.concurrent.atomic.AtomicInteger;\n+import java.util.concurrent.atomic.AtomicReference;\n \n import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;\n+import static org.hamcrest.Matchers.emptyCollectionOf;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n import static org.hamcrest.Matchers.is;\n@@ -1068,4 +1085,103 @@ public void testFilterCacheStats() throws Exception {\n assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), equalTo(0L));\n }\n \n+ /**\n+ * Test that we can safely concurrently index and get stats. This test was inspired by a serialization issue that arose due to a race\n+ * getting doc stats during heavy indexing. The race could lead to deleted docs being negative which would then be serialized as a\n+ * variable-length long. Since serialization of negative longs using a variable-length format was unsupported\n+ * ({@link org.elasticsearch.common.io.stream.StreamOutput#writeVLong(long)}), the stream would become corrupted. Here, we want to test\n+ * that we can continue to get stats while indexing.\n+ */\n+ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierException, InterruptedException, ExecutionException {\n+ final AtomicInteger idGenerator = new AtomicInteger();\n+ final int numberOfIndexingThreads = Runtime.getRuntime().availableProcessors();\n+ final int numberOfStatsThreads = 4 * numberOfIndexingThreads;\n+ final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfIndexingThreads + numberOfStatsThreads);\n+ final AtomicBoolean stop = new AtomicBoolean();\n+ final List<Thread> threads = new ArrayList<>(numberOfIndexingThreads + numberOfIndexingThreads);\n+\n+ final CountDownLatch latch = new CountDownLatch(1);\n+ final AtomicBoolean failed = new AtomicBoolean();\n+ final AtomicReference<List<ShardOperationFailedException>> shardFailures = new AtomicReference<>(new CopyOnWriteArrayList<>());\n+ final AtomicReference<List<Exception>> executionFailures = new AtomicReference<>(new CopyOnWriteArrayList<>());\n+\n+ // increasing the number of shards increases the number of chances any one stats request will hit a race\n+ final CreateIndexRequest createIndexRequest =\n+ new CreateIndexRequest(\"test\", Settings.builder().put(\"index.number_of_shards\", 10).build());\n+ client().admin().indices().create(createIndexRequest).get();\n+\n+ // start threads that will index concurrently with stats requests\n+ for (int i = 0; i < numberOfIndexingThreads; i++) {\n+ final Thread thread = new Thread(() -> {\n+ try {\n+ barrier.await();\n+ } catch (final BrokenBarrierException | InterruptedException e) {\n+ failed.set(true);\n+ executionFailures.get().add(e);\n+ latch.countDown();\n+ }\n+ while (!stop.get()) {\n+ final String id = Integer.toString(idGenerator.incrementAndGet());\n+ final IndexResponse response =\n+ client()\n+ .prepareIndex(\"test\", \"type\", id)\n+ .setSource(\"{}\")\n+ .get();\n+ assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED));\n+ }\n+ });\n+ thread.setName(\"indexing-\" + i);\n+ threads.add(thread);\n+ thread.start();\n+ }\n+\n+ // start threads that will get stats concurrently with indexing\n+ for (int i = 0; i < numberOfStatsThreads; i++) {\n+ final Thread thread = new Thread(() -> {\n+ try {\n+ barrier.await();\n+ } catch (final BrokenBarrierException | InterruptedException e) {\n+ failed.set(true);\n+ executionFailures.get().add(e);\n+ latch.countDown();\n+ }\n+ final IndicesStatsRequest request = new IndicesStatsRequest();\n+ request.all();\n+ request.indices(new String[0]);\n+ while (!stop.get()) {\n+ try {\n+ final IndicesStatsResponse response = client().admin().indices().stats(request).get();\n+ if (response.getFailedShards() > 0) {\n+ failed.set(true);\n+ shardFailures.get().addAll(Arrays.asList(response.getShardFailures()));\n+ latch.countDown();\n+ }\n+ } catch (final ExecutionException | InterruptedException e) {\n+ failed.set(true);\n+ executionFailures.get().add(e);\n+ latch.countDown();\n+ }\n+ }\n+ });\n+ thread.setName(\"stats-\" + i);\n+ threads.add(thread);\n+ thread.start();\n+ }\n+\n+ // release the hounds\n+ barrier.await();\n+\n+ // wait for a failure, or for fifteen seconds to elapse\n+ latch.await(15, TimeUnit.SECONDS);\n+\n+ // stop all threads and wait for them to complete\n+ stop.set(true);\n+ for (final Thread thread : threads) {\n+ thread.join();\n+ }\n+\n+ assertThat(shardFailures.get(), emptyCollectionOf(ShardOperationFailedException.class));\n+ assertThat(executionFailures.get(), emptyCollectionOf(Exception.class));\n+ }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java", "status": "modified" }, { "diff": "@@ -108,6 +108,8 @@ PUT logs_write/log/1\n \"message\": \"a dummy log\"\n }\n \n+POST logs_write/_refresh\n+\n # Wait for a day to pass\n \n POST /logs_write/_rollover <2>", "filename": "docs/reference/indices/rollover-index.asciidoc", "status": "modified" } ] }
{ "body": "Right now the below aggregation is not possible even though the 'nested_agg' does return a single bucket and nested aggregation is a single bucket aggregation. \n\nBelow sample aggregation generates an error message that says 'nested_agg' is not a single bucket aggregation and can not be in the order path.\n\n```\n{\nbuckets: {\nterms: {\n field: 'docId',\n order: {'nested_agg>sum_value': 'desc'}\n},\naggs: {\n nested_agg: {\n nested: {\n path: 'my_nested_object'\n },\n aggs: {\n sum_value: {\n sum: {field: 'my_nested_object.value'}\n }\n }\n }\n }\n }\n}\n```\n", "comments": [ { "body": "@colings86 any thoughts on this?\n", "created_at": "2016-02-29T00:24:56Z" }, { "body": "I managed to reproduce this on the master branch and now know why this is happening but I don't have a solution as to how we can fix it short of just documenting that you can't order by an aggregation within a nested aggregation.\n\nThe issue is that the `NestedAggregatorFactory.createInternal()` method calls `AggregatorFactory.asMultiBucketAggregator()`. This creates a wrapper around the `NestedAggregator` that will create a separate instance of `NestedAggregator` for each parent bucket. We do this in the `NestedAggregator` to ensure the doc ids are delivered in order because with a single instance and multi-valued nested fields we could get documents not in order. Some of the aggregations rely on the fact that documents are collected in order. For example, we could collect (doc1, bucket1), (doc2, bucket1), (doc1, bucket2), (doc2, bucket2) which would be out of order, so by having separate instances we are guaranteeing docId order since each instance will only collect one bucket.\n\nI tried to change the `AggregationPath.validate()` method to use the underlying aggregator (the first instance of it at least) but then it fails later because we need to retrieve the value from the aggregator and there is no way of getting the value from a particular instance form the wrapper.\n", "created_at": "2016-02-29T13:25:24Z" }, { "body": "I managed to somehow face this issue again. The \"path\" parameter in moving average can not point to a nested aggregation because nested aggregation is not a single bucket aggregation.\n", "created_at": "2016-04-03T01:40:14Z" }, { "body": "Is there any way to get around this 'issue'?? I'm running into the same issues\n", "created_at": "2016-04-29T13:34:17Z" }, { "body": "@clintongormley @colings86 Any update on this please? We are badly stuck without this...\n", "created_at": "2016-06-28T13:54:08Z" }, { "body": "+1\n\nmy question on stackoverflow\nhttp://stackoverflow.com/questions/38089711/how-can-i-sort-aggregations-buckets-by-metric-results\n", "created_at": "2016-06-29T06:29:25Z" }, { "body": "+1 for this as well. Very big use case scenario for us.\n", "created_at": "2016-07-06T17:28:10Z" }, { "body": "+1 as this is showstoper for us to upgrade from es v1 to es v2\n", "created_at": "2016-07-12T07:57:32Z" }, { "body": "We were bitten by the same thing. FWIW, we worked around it temporarily by ordering the aggregations in the application code after they are returned from ES.\n", "created_at": "2016-07-12T08:12:59Z" }, { "body": "This missing feature is preventing us from upgrading to ES 2.X. Is there any plans to support this in the near future?\n", "created_at": "2016-08-26T13:55:36Z" }, { "body": "@clintongormley Nested architecture is an important functionality in ES. Most companies build atleast at minimum some sort of functionality with nested mappings. This bug renders ES useless. Any updates?\n", "created_at": "2016-08-26T14:03:59Z" }, { "body": "+1 sorting after the fact in our application isn't a viable option due to number of results. \n", "created_at": "2016-10-06T19:12:09Z" }, { "body": "I have made a fix to sort which has nested aggregations in path. Also you might have multi-value buckets in path (you should just specify bucket key in path like \"colors.red>stats.variance\").\r\nI might create a pull request or just give a link to the commit in fork of ES 5.1.2 if anyone is interested. ", "created_at": "2016-12-20T19:31:26Z" }, { "body": "That would be great, or link in your fork?\n\nOp di 20 dec. 2016 20:32 schreef idozorenko <notifications@github.com>:\n\n> I have made a fix to sort which has nested aggregations in path. Also you\n> might have multi-value buckets in path (you should just specify bucket key\n> in path like \"colors.red>stats.variance\").\n> I might create a pull request or just give a link to the commit in fork of\n> ES 5.1.2 if anyone is interested.\n>\n> —\n> You are receiving this because you are subscribed to this thread.\n> Reply to this email directly, view it on GitHub\n> <https://github.com/elastic/elasticsearch/issues/16838#issuecomment-268335539>,\n> or mute the thread\n> <https://github.com/notifications/unsubscribe-auth/AH4yOnC_e-o0zAsgVPxT6MWYR8jdUBwNks5rKC03gaJpZM4HkeIN>\n> .\n>\n", "created_at": "2016-12-20T19:34:27Z" }, { "body": ">I might create a pull request\r\n\r\n:+1:", "created_at": "2016-12-21T06:43:02Z" }, { "body": "As I'm not a contributor, I will just share my commit to ES 5.1 branch here. Please let me know if you have any questions.\r\n\r\nhttps://github.com/elastic/elasticsearch/commit/8f601a3c241cb652a889870d93fd32b3d226ef41\r\n", "created_at": "2016-12-21T12:46:34Z" }, { "body": "@idozorenko feel free to submit a PR so that we can review the code - thanks", "created_at": "2016-12-21T12:48:50Z" }, { "body": "Does this problem also occur in reverse_nested aggs? (not direct nested)", "created_at": "2017-05-06T02:04:12Z" }, { "body": "yes", "created_at": "2017-05-06T07:49:00Z" }, { "body": "+1 for this issue. We have exact same problem and same query is running with AWS ES 1.5. \r\n\r\nCan any one tell us about the status for this bug fix? This is very critical feature for us and can not move forward without this functionality? Does any one suggest to use ES 1.5 instead of 5.X version? (Personally i do not think we should do this)", "created_at": "2017-05-08T11:00:35Z" }, { "body": "We didn't want to wait for a fix or to upgrade so we ended up restructuring\nour data to be a parent/child relationship vs nested. So far so good.\n\nOn Mon, May 8, 2017 at 5:01 AM, akashmpatel91 <notifications@github.com>\nwrote:\n\n> +1 for this issue. We have exact same problem and same query is running\n> with AWS ES 1.5.\n>\n> Can any one tell us about the status for this bug fix? This is very\n> critical feature for us and can not move forward without this\n> functionality? Does any one suggest to use ES 1.5 instead of 5.X version?\n> (Personally i do not think we should do this)\n>\n> —\n> You are receiving this because you are subscribed to this thread.\n> Reply to this email directly, view it on GitHub\n> <https://github.com/elastic/elasticsearch/issues/16838#issuecomment-299837273>,\n> or mute the thread\n> <https://github.com/notifications/unsubscribe-auth/ABAejttN4mvEjco7X7BmwNBNVOOoopaQks5r3vX7gaJpZM4HkeIN>\n> .\n>\n", "created_at": "2017-05-08T17:45:16Z" }, { "body": "@brettahale, Please note that \"parent-child relations can make queries hundreds of times slower\" as per ES documentation https://www.elastic.co/guide/en/elasticsearch/reference/master/tune-for-search-speed.html. \r\n\r\nWe did POC and it is correct, we are handling 5-6 billions documents and query is taking 6-7 sec to return results. With nested document query is returning results in 400 ms.", "created_at": "2017-05-08T19:18:59Z" }, { "body": "Agreed, wasn't ideal but we were able to deliver our feature. I'd like to\nsee a fix here as well but after a chat with ES support, it sounded like a\nfoundational change that wasn't likely going to get fixed anytime soon.\n\nOn Mon, May 8, 2017 at 1:19 PM, akashmpatel91 <notifications@github.com>\nwrote:\n\n> @brettahale <https://github.com/brettahale>, Please note that\n> \"parent-child relations can make queries hundreds of times slower\" as per\n> ES documentation https://www.elastic.co/guide/en/elasticsearch/reference/\n> master/tune-for-search-speed.html.\n>\n> We did POC and it is correct, we are handling 5-6 billions documents and\n> query is taking 6-7 sec to return results. With nested document query is\n> returning results in 400 ms.\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> <https://github.com/elastic/elasticsearch/issues/16838#issuecomment-299963988>,\n> or mute the thread\n> <https://github.com/notifications/unsubscribe-auth/ABAeju3EHsROM1hWyII36f5pPpPeVK0bks5r32rQgaJpZM4HkeIN>\n> .\n>\n", "created_at": "2017-05-08T23:46:15Z" }, { "body": "Hello team, any update on this?", "created_at": "2017-05-18T18:43:13Z" }, { "body": "Our workaround was to copy some fields from the parent docs into its nested docs, so we can still make terms aggregation on the nested docs but sort by fields \"found\" in the parent docs. This allowed us to omit the reverse_nested aggregation between the bucket and the sub bucket. Not ideal, but works in our cases. Of course, a fix would be much appreciated.\r\n\r\nEdit:\r\nApparently, there was no need for a workaround in my case.\r\nSee the comment below.", "created_at": "2017-06-12T18:12:47Z" }, { "body": "@colings86: Actually I did managed to use Terms aggregation, a sub Reverse_Nested aggregation and a sub Cardinality aggregation for ordering. Something like that:\r\n```\r\n{\r\n \"aggs\" : {\r\n \"AllMovieNames\" : {\r\n \"terms\" : { \"field\" : \"MovieName\" },\r\n \"order\": {\r\n \"backToActors>distinctActorsCount\":\"desc\"\r\n },\r\n \"size\": 10\r\n },\r\n\t\t\"aggs\":\r\n\t\t{\r\n\t\t\t\"backToActors\":{\t\t\r\n\t\t\t\t\"reverse_nested\":{},\r\n\t\t\t\t\"aggs\":{\r\n\t\t\t\t\t\"distinctActorsCount\":{\r\n\t\t\t\t\t\t\"cardinality\":{\r\n\t\t\t\t\t\t\t\"field\":\"ActorName\"\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\t\t\t\r\n\t\t\t}\t\r\n\t\t}\t\t\t\r\n }\t\r\n}\r\n```\r\nNo exception message was thrown, and the order was just as expected.\r\n\r\nAre you sure the problem occur in both Nested and Reverse_Nested sub aggregation? \r\nI'm using ElasticSearch 5.3.2.", "created_at": "2017-06-13T18:34:06Z" }, { "body": "@IdanWo sorry, actually you are right, this problem doesn't occur on the `reverse_nested` aggregation, the only single bucket aggregation it should affect is the `nested` aggregation because thats the only single bucket aggregation that uses `AggregatorFactory.asMultiBucketAggregator()`", "created_at": "2017-06-20T10:58:33Z" }, { "body": "I'm another victim of this insidious bug:\r\n\r\nSituation:\r\n\r\n- Document ROOT with two nested documents NESTED1 and NESTED2.\r\n- Term aggregation over a field in ROOT.NESTED1 (nested aggregation -to NESTED1- then term aggregation)\r\n- Sum aggregation over a field in ROOT.NESTED2 (inside the previous term aggregation, reverse nested aggregation -back to ROOT-, nested aggregation -to NESTED2- then sum aggregation)\r\n\r\nI cannot use the sum aggregation to sort the term aggregation because an error is thrown saying that the nested aggregation -to NESTED2- does not returns a single-bucket aggregation\r\n\r\n**Can someone update us with the status of this bug?**\r\n\r\n(I'm using ElasticSearch 5.4)", "created_at": "2017-07-18T14:04:04Z" }, { "body": "Hello Team, any update on this Bug fix? It is not working only in 5.x version. Can you please provide ETA for this bug to be fixed? This is very important feature for term aggregation and blocking many clients.", "created_at": "2017-08-04T06:31:46Z" }, { "body": "Hi All, I was able to get around this by doing something similar to the following. For this, there was only one nested value that will match the interval condition - but you could get creative :)\r\n\r\nMapping:\r\n```\r\n \"trendingpopularityjson\": {\r\n \"type\": \"nested\",\r\n \"include_in_parent\": true,\r\n \"properties\": {\r\n \"interval\": {\r\n \"type\": \"integer\"\r\n },\r\n \"trendingpopularity\": {\r\n \"type\": \"integer\"\r\n }\r\n }\r\n }\r\n```\r\n\r\nAggregation to sum inside. This would avoid the nested aggregation - making it easy :\r\n\r\n```\r\n \"Trend\": {\r\n \"sum\": {\r\n \"script\": {\r\n \"inline\": \"def d = doc['trendingpopularityjson.interval']; for (int i = 0; i < d.length; ++i) { if (d[i] == params.interval) { return doc['trendingpopularityjson.trendingpopularity'][i] } }\",\r\n \"params\": {\r\n \"interval\": 2\r\n },\r\n \"lang\": \"painless\"\r\n }\r\n }\r\n }\r\n```", "created_at": "2017-08-04T20:49:20Z" } ], "number": 16838, "title": "Sort term aggregation with nested aggregation in order path" }
{ "body": "Fixes #16838\r\n\r\n<!--\r\nThank you for your interest in and contributing to Elasticsearch! There\r\nare a few simple things to check before submitting your pull request\r\nthat can help with the review process. You should delete these items\r\nfrom your submission, but they are here to help bring them to your\r\nattention.\r\n-->\r\n\r\n- Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)?\r\n- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md)?\r\n- If submitting code, have you built your formula locally prior to submission with `gradle check`?\r\n- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed.\r\n- If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)?\r\n", "number": 22303, "review_comments": [ { "body": "I'm not sure this will work for all cases since this is always returning the first aggregator in the list (the aggregator relating to the parent bucket which has ordinal `0`). I think by doing this it will return the wrong aggregator (and therefore give unexpected results) when the aggregator object for other parent bucket ordinals are required.", "created_at": "2017-01-04T10:48:40Z" }, { "body": "As mentioned in my main comment I would personally prefer if we avoided referencing specific Aggregation classes in the aggregator framework code", "created_at": "2017-01-04T10:49:37Z" }, { "body": "The problem here is that now this check lets through any aggregation which implements `MultiBucketAggregatorWrapper` which is more than just the `NestedAggregator` (and `TermsAggregator`). So if I now try to sort be a histogram aggregation I get a very confusing error:\r\n```\r\nInvalid order path [histo_price>avg_price]. Metrics aggregations cannot have sub-aggregations (at [histo_price>avg_price]\r\n```\r\nWhich is confusing because the histogram aggregation is not a metrics aggregation.\r\n\r\nFor the `NestedAggregator` case (and presumably `ReverseNestedAggregator` too) it might be a good idea to create a `SingleBucketAggregatorWrapper` class which does the same trick as the `MultiBucketAggregatorWrapper` but also implements `SingleBucketAggregator`. That way this check remains intact but we can support the nested aggregation in the terms ordering?", "created_at": "2017-01-04T10:56:34Z" }, { "body": "I just wonder: wouldn't it make sense to extract `first` out of `MultiBucketAggregatorWrapper` instead?", "created_at": "2017-04-13T12:08:56Z" } ], "title": "Allows to use nested aggregation and terms aggregation in sort by deep metric" }
{ "commits": [ { "message": "Allows to use nested aggregation and terms aggregation in sort by deep metric.\n\nFixes #16838" }, { "message": "Fixes ArrayOutOfBoundsException in ValueCountAggregator\n\nAddition to #17379" } ], "files": [ { "diff": "@@ -80,7 +80,10 @@ public boolean needsScores() {\n \n @Override\n public Aggregator subAggregator(String name) {\n- throw new UnsupportedOperationException();\n+ if(aggregators.get(0) != null) {\n+ return aggregators.get(0).subAggregator(name);\n+ }\n+ return null;\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java", "status": "modified" }, { "diff": "@@ -82,7 +82,10 @@ public void collect(int doc, long bucket) throws IOException {\n \n @Override\n public double metric(long owningBucketOrd) {\n- return valuesSource == null ? 0 : counts.get(owningBucketOrd);\n+ if (valuesSource == null || owningBucketOrd >= counts.size()) {\n+ return 0;\n+ }\n+ return counts.get(owningBucketOrd);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java", "status": "modified" }, { "diff": "@@ -20,12 +20,10 @@\n package org.elasticsearch.search.aggregations.support;\n \n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.search.aggregations.Aggregation;\n-import org.elasticsearch.search.aggregations.AggregationExecutionException;\n-import org.elasticsearch.search.aggregations.Aggregator;\n-import org.elasticsearch.search.aggregations.HasAggregations;\n+import org.elasticsearch.search.aggregations.*;\n import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;\n import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;\n+import org.elasticsearch.search.aggregations.bucket.terms.*;\n import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation;\n import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator;\n \n@@ -71,43 +69,27 @@ public static AggregationPath parse(String path) {\n String[] tuple = new String[2];\n for (int i = 0; i < elements.length; i++) {\n String element = elements[i];\n- if (i == elements.length - 1) {\n- int index = element.lastIndexOf('[');\n- if (index >= 0) {\n- if (index == 0 || index > element.length() - 3) {\n- throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n- }\n- if (element.charAt(element.length() - 1) != ']') {\n- throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n- }\n- tokens.add(new PathElement(element, element.substring(0, index), element.substring(index + 1, element.length() - 1)));\n- continue;\n- }\n- index = element.lastIndexOf('.');\n- if (index < 0) {\n- tokens.add(new PathElement(element, element, null));\n- continue;\n- }\n- if (index == 0 || index > element.length() - 2) {\n+ int index = element.lastIndexOf('[');\n+ if (index >= 0) {\n+ if (index == 0 || index > element.length() - 3) {\n throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n }\n- tuple = split(element, index, tuple);\n- tokens.add(new PathElement(element, tuple[0], tuple[1]));\n-\n- } else {\n- int index = element.lastIndexOf('[');\n- if (index >= 0) {\n- if (index == 0 || index > element.length() - 3) {\n- throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n- }\n- if (element.charAt(element.length() - 1) != ']') {\n- throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n- }\n- tokens.add(new PathElement(element, element.substring(0, index), element.substring(index + 1, element.length() - 1)));\n- continue;\n+ if (element.charAt(element.length() - 1) != ']') {\n+ throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n }\n+ tokens.add(new PathElement(element, element.substring(0, index), element.substring(index + 1, element.length() - 1)));\n+ continue;\n+ }\n+ index = element.lastIndexOf('.');\n+ if (index < 0) {\n tokens.add(new PathElement(element, element, null));\n+ continue;\n }\n+ if (index == 0 || index > element.length() - 2) {\n+ throw new AggregationExecutionException(\"Invalid path element [\" + element + \"] in path [\" + path + \"]\");\n+ }\n+ tuple = split(element, index, tuple);\n+ tokens.add(new PathElement(element, tuple[0], tuple[1]));\n }\n return new AggregationPath(tokens);\n }\n@@ -217,10 +199,23 @@ public double resolveValue(HasAggregations root) {\n continue;\n }\n \n- // the agg can only be a metrics agg, and a metrics agg must be at the end of the path\n if (i != pathElements.size() - 1) {\n- throw new IllegalArgumentException(\"Invalid order path [\" + this +\n- \"]. Metrics aggregations cannot have sub-aggregations (at [\" + token + \">\" + pathElements.get(i + 1) + \"]\");\n+ // try get bucket by key name\n+ if(agg instanceof InternalMappedTerms){\n+ InternalTerms.Bucket bucket = ((InternalMappedTerms) agg).getBucketByKey(token.key);\n+ if(bucket != null) {\n+ parent = bucket;\n+ value = bucket.getDocCount();\n+ continue;\n+ } else {\n+ value = Double.NaN;\n+ break;\n+ }\n+ } else {\n+ // the agg can only be a metrics agg, and a metrics agg must be at the end of the path\n+ throw new IllegalArgumentException(\"Invalid order path [\" + this +\n+ \"]. Metrics aggregations cannot have sub-aggregations (at [\" + token + \">\" + pathElements.get(i + 1) + \"]\");\n+ }\n }\n \n if (agg instanceof InternalNumericMetricsAggregation.SingleValue) {\n@@ -295,21 +290,13 @@ public void validate(Aggregator root) {\n \n // we're in the middle of the path, so the aggregator can only be a single-bucket aggregator\n \n- if (!(aggregator instanceof SingleBucketAggregator)) {\n+ if (!(aggregator instanceof SingleBucketAggregator) && !(aggregator instanceof AggregatorFactory.MultiBucketAggregatorWrapper)) {\n throw new AggregationExecutionException(\"Invalid terms aggregation order path [\" + this +\n \"]. Terms buckets can only be sorted on a sub-aggregator path \" +\n \"that is built out of zero or more single-bucket aggregations within the path and a final \" +\n \"single-bucket or a metrics aggregation at the path end. Sub-path [\" +\n subPath(0, i + 1) + \"] points to non single-bucket aggregation\");\n }\n-\n- if (pathElements.get(i).key != null) {\n- throw new AggregationExecutionException(\"Invalid terms aggregation order path [\" + this +\n- \"]. Terms buckets can only be sorted on a sub-aggregator path \" +\n- \"that is built out of zero or more single-bucket aggregations within the path and a \" +\n- \"final single-bucket or a metrics aggregation at the path end. Sub-path [\" +\n- subPath(0, i + 1) + \"] points to non single-bucket aggregation\");\n- }\n }\n }\n boolean singleBucket = aggregator instanceof SingleBucketAggregator;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java", "status": "modified" } ] }
{ "body": "Currently `UnicastHostsProvider#buildDynamicNodes` is called multiple times during the pinging rounds. Once on start, one more after 1.5 seconds (by default) and last time at the end (3s by default). My guess is that this done in an attempt to get the latest information possible when connect to EC2 / GCE/ Azure's API. This does have a nasty side effect on the FileBasedUnicastHostsProvider, which currently reads it's file on ever call. The file reading is not a problem on it's own as it is just as fast as an EC2 call. The problem part is that each read generates nodes with new node ids, meaning that UnicastZenPing will setup new connections with each of these rounds. \r\n\r\nI see two relatively easy ways of solving it:\r\n1) Call `buildDynamicNodes` once at the beginning of each round (might be the simplest).\r\n2) Add an \"onStartPing\" method to `UnicastHostsProvider` which is guaranteed to be called at the beginning of each pinging round. `FileBasedDiscovery` can read the file contents in that method.", "comments": [ { "body": "+1 to option number 1", "created_at": "2016-11-22T20:04:29Z" }, { "body": "I marked this as an adopt me. It's a fairly easy entry into the discovery universe. If anyone want's to pick it up, ping me and I'll help get going.", "created_at": "2016-11-24T17:25:47Z" }, { "body": "@bleskes I want's to pick this up.", "created_at": "2016-12-05T11:17:52Z" }, { "body": "@makeyang good to hear. @chengpohi already made an attempt. Not sure if he (judging by avatar) plans to continue with it.... ", "created_at": "2016-12-07T18:14:38Z" } ], "number": 21739, "title": "FileBasedDiscovery causes connections on every ping round" }
{ "body": "The `UnicastZenPing` shows it's age and is the result of many small changes. The current state of affairs is confusing and is hard to reason about. This PR cleans it up (while following the same original intentions). Highlights of the changes are:\r\n\r\n1) Clear 3 round flow - no interleaving of scheduling.\r\n2) The previous implementation did a best effort attempt to wait for ongoing pings to be sent and completed. The pings were guaranteed to complete because each used the total ping duration as a timeout. This did make it hard to reason about the total ping duration and the flow of the code. All of this is removed now and ping should just complete within the given duration or not be counted (note that it was very handy for testing, but I move the needed sync logic to the test).\r\n3) Because of (2) the pinging scheduling changed a bit, to give a chance for the last round to complete. We now ping at the beginning, 1/3 and 2/3 of the duration.\r\n4) To offset for (3) a bit, incoming ping requests are now added to on going ping collections.\r\n5) UnicastZenPing never establishes full blown connections (but does reuse them if there). Relates to #22120\r\n6) Discovery host providers are only used once per pinging round. Closes #21739\r\n7) Usage of the ability to open a connection without connecting to a node ( #22194 ) and shorter connection timeouts helps with connections piling up. Closes #19370\r\n8) Beefed up testing and sped them up.\r\n9) removed light profile from production code ", "number": 22277, "review_comments": [ { "body": "💥 ", "created_at": "2016-12-20T12:11:49Z" }, { "body": "please restore the interrupt status here?", "created_at": "2016-12-20T12:14:46Z" }, { "body": "maybe `(e instanceof AlreadyClosedException) == false`? ", "created_at": "2016-12-20T12:15:43Z" }, { "body": "we call it `ensureOpen` everywhere can we do the same here?", "created_at": "2016-12-20T12:17:30Z" }, { "body": "hmm that looks weird. Can we maybe use a `KeyedLock` when we open the connections with IP and port or something like this?", "created_at": "2016-12-20T12:19:06Z" }, { "body": "use `localNode.equals(incomingObject)` we know that `localnode` is non-null", "created_at": "2016-12-20T12:19:42Z" }, { "body": "if you wanna be more explicit you can use `Function::identity` to make sure it's not a typo", "created_at": "2016-12-20T12:27:05Z" }, { "body": "maybe log this?", "created_at": "2016-12-20T12:28:40Z" }, { "body": "do we need this trace log here and if so can we fix it to say `temporarily` or something like this", "created_at": "2016-12-20T12:30:59Z" }, { "body": "just flip it then you don't need to negate", "created_at": "2016-12-20T12:39:01Z" }, { "body": "if you keep the trace maybe use a logging guard here?", "created_at": "2016-12-20T12:39:18Z" }, { "body": "I wonder if we should do this. I think we should move the `LIGHT_PROFILE` into tests somewhere and then require every special use to build it's own. The problem I have here is that the `getLightProfileWithTimeout` shares one connection across all uses. I think in the case of ping we should only use 1 connection for `PING` and 0 for the others. that will cause an exception if it's used in a wrong context. makes sense?", "created_at": "2016-12-20T12:44:44Z" }, { "body": "++", "created_at": "2016-12-20T12:44:50Z" }, { "body": "this is how it was and we do throw an exception, thus processing the interrupt?", "created_at": "2016-12-20T15:15:23Z" }, { "body": "yep. This morphed - I used to have a log there but got annoyed with it (just noise). ", "created_at": "2016-12-20T15:16:08Z" }, { "body": "good one. Will change.", "created_at": "2016-12-20T15:17:40Z" }, { "body": "yeah, I wanted to have the simplest construct as it was a rare collision. With the latest code I actually think it's impossible (I dedup on addresses and the connection are private to the pinging round). Will remove.", "created_at": "2016-12-20T15:19:15Z" }, { "body": "+++ (although it's not optional on the wire)", "created_at": "2016-12-20T15:19:52Z" }, { "body": "the compiler can't figure out the implied types with the identity. I changed it to be more verbose so it won't look like a typo", "created_at": "2016-12-20T15:26:06Z" }, { "body": "sure", "created_at": "2016-12-20T15:27:15Z" }, { "body": "I adapted the log message", "created_at": "2016-12-20T15:27:59Z" }, { "body": "sure thing, will add.", "created_at": "2016-12-20T15:31:56Z" }, { "body": "sure", "created_at": "2016-12-20T16:36:36Z" }, { "body": "I tried to implement your suggestion and I think it looks good. will push shortly.", "created_at": "2016-12-20T19:43:24Z" }, { "body": "turns out we do need this protection or something similar. I took another approach, which I think you'd like better.", "created_at": "2016-12-20T19:44:56Z" }, { "body": "This logging was initially added to just `testSimplePings` to chase a race. The race has not reproduced since adding this logging. I think that we should drop the logging and and then address if the race comes back since you've changed how these things are handled.", "created_at": "2016-12-20T20:37:17Z" }, { "body": "removed. ", "created_at": "2016-12-20T20:47:55Z" }, { "body": "maybe use `computeIfAbsent()`?", "created_at": "2016-12-21T07:00:52Z" }, { "body": "you didn't like `Function.identity()` ?", "created_at": "2016-12-21T07:01:58Z" }, { "body": "`Arrays.asStream(response.pingResponses)` would not materialize it", "created_at": "2016-12-21T07:02:40Z" } ], "title": "Simplify Unicast Zen Ping" }
{ "commits": [ { "message": "initial implementation" }, { "message": "speed up pinging tests" }, { "message": "linting" }, { "message": "fix FileBasedUnicastHostsProviderTests" }, { "message": "Merge remote-tracking branch 'upstream/master' into unicast_zen_cleanup" }, { "message": "fix racing conditions in waiting for completeness" }, { "message": "better dedupping en using non-async and exact counters" }, { "message": "add a test for remembering incoming pings" }, { "message": "use light connections with the right timeout" }, { "message": "Merge remote-tracking branch 'upstream/master' into unicast_zen_cleanup" }, { "message": "feedback" }, { "message": "feedback" }, { "message": "Merge remote-tracking branch 'upstream/master' into unicast_zen_cleanup" }, { "message": "remove LIGHT_PROFILE in favor of dedicated single channel profiles" }, { "message": "TransportClientNodesService is not ready yet for a single channel type" }, { "message": "remove trace logging" }, { "message": "feedback" } ], "files": [ { "diff": "@@ -101,6 +101,21 @@ final class TransportClientNodesService extends AbstractComponent implements Clo\n \n private final TransportClient.HostFailureListener hostFailureListener;\n \n+ // TODO: migrate this to use low level connections and single type channels\n+ /** {@link ConnectionProfile} to use when to connecting to the listed nodes and doing a liveness check */\n+ private static final ConnectionProfile LISTED_NODES_PROFILE;\n+\n+ static {\n+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ builder.addConnections(1,\n+ TransportRequestOptions.Type.BULK,\n+ TransportRequestOptions.Type.PING,\n+ TransportRequestOptions.Type.RECOVERY,\n+ TransportRequestOptions.Type.REG,\n+ TransportRequestOptions.Type.STATE);\n+ LISTED_NODES_PROFILE = builder.build();\n+ }\n+\n TransportClientNodesService(Settings settings, TransportService transportService,\n ThreadPool threadPool, TransportClient.HostFailureListener hostFailureListener) {\n super(settings);\n@@ -389,8 +404,8 @@ protected void doSample() {\n if (!transportService.nodeConnected(listedNode)) {\n try {\n // its a listed node, light connect to it...\n- logger.trace(\"connecting to listed node (light) [{}]\", listedNode);\n- transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);\n+ logger.trace(\"connecting to listed node [{}]\", listedNode);\n+ transportService.connectToNode(listedNode, LISTED_NODES_PROFILE);\n } catch (Exception e) {\n logger.info(\n (Supplier<?>)\n@@ -470,7 +485,7 @@ public void run() {\n } else {\n // its a listed node, light connect to it...\n logger.trace(\"connecting to listed node (light) [{}]\", listedNode);\n- transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);\n+ transportService.connectToNode(listedNode, LISTED_NODES_PROFILE);\n }\n } catch (Exception e) {\n logger.debug(", "filename": "core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java", "status": "modified" }, { "diff": "@@ -24,7 +24,6 @@\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.common.component.AbstractComponent;\n-import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n@@ -174,7 +173,7 @@ public void logMinimumMasterNodesWarningIfNecessary(ClusterState oldState, Clust\n * Returns the given nodes sorted by likelihood of being elected as master, most likely first.\n * Non-master nodes are not removed but are rather put in the end\n */\n- public static List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {\n+ static List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {\n ArrayList<DiscoveryNode> sortedNodes = CollectionUtils.iterableAsArrayList(nodes);\n CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes);\n return sortedNodes;", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java", "status": "modified" }, { "diff": "@@ -23,13 +23,12 @@\n import org.apache.logging.log4j.Logger;\n import org.apache.logging.log4j.message.ParameterizedMessage;\n import org.apache.logging.log4j.util.Supplier;\n-import org.elasticsearch.ElasticsearchException;\n+import org.apache.lucene.store.AlreadyClosedException;\n+import org.apache.lucene.util.IOUtils;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterName;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n-import org.elasticsearch.common.Nullable;\n-import org.elasticsearch.common.UUIDs;\n import org.elasticsearch.common.component.AbstractComponent;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n@@ -44,10 +43,14 @@\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.common.util.concurrent.EsExecutors;\n-import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;\n+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;\n+import org.elasticsearch.common.util.concurrent.KeyedLock;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.ConnectTransportException;\n+import org.elasticsearch.transport.ConnectionProfile;\n+import org.elasticsearch.transport.NodeNotConnectedException;\n import org.elasticsearch.transport.RemoteTransportException;\n+import org.elasticsearch.transport.Transport.Connection;\n import org.elasticsearch.transport.TransportChannel;\n import org.elasticsearch.transport.TransportException;\n import org.elasticsearch.transport.TransportRequest;\n@@ -60,8 +63,8 @@\n import java.io.IOException;\n import java.util.ArrayList;\n import java.util.Arrays;\n-import java.util.Collection;\n-import java.util.HashSet;\n+import java.util.Collections;\n+import java.util.HashMap;\n import java.util.Iterator;\n import java.util.List;\n import java.util.Locale;\n@@ -70,18 +73,17 @@\n import java.util.Queue;\n import java.util.Set;\n import java.util.concurrent.Callable;\n-import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ExecutionException;\n import java.util.concurrent.ExecutorService;\n import java.util.concurrent.Future;\n-import java.util.concurrent.RejectedExecutionException;\n import java.util.concurrent.ThreadFactory;\n import java.util.concurrent.TimeUnit;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicInteger;\n-import java.util.concurrent.atomic.AtomicReference;\n+import java.util.function.Consumer;\n import java.util.function.Function;\n import java.util.stream.Collectors;\n+import java.util.stream.Stream;\n \n import static java.util.Collections.emptyList;\n import static java.util.Collections.emptyMap;\n@@ -116,22 +118,19 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {\n \n private volatile PingContextProvider contextProvider;\n \n- private final AtomicInteger pingHandlerIdGenerator = new AtomicInteger();\n+ private final AtomicInteger pingingRoundIdGenerator = new AtomicInteger();\n \n- // used to generate unique ids for nodes/address we temporarily connect to\n- private final AtomicInteger unicastNodeIdGenerator = new AtomicInteger();\n-\n- // used as a node id prefix for nodes/address we temporarily connect to\n+ // used as a node id prefix for configured unicast host nodes/address\n private static final String UNICAST_NODE_PREFIX = \"#zen_unicast_\";\n \n- private final Map<Integer, SendPingsHandler> receivedResponses = newConcurrentMap();\n+ private final Map<Integer, PingingRound> activePingingRounds = newConcurrentMap();\n \n // a list of temporal responses a node will return for a request (holds responses from other nodes)\n private final Queue<PingResponse> temporalResponses = ConcurrentCollections.newQueue();\n \n private final UnicastHostsProvider hostsProvider;\n \n- private final ExecutorService unicastZenPingExecutorService;\n+ protected final EsThreadPoolExecutor unicastZenPingExecutorService;\n \n private final TimeValue resolveTimeout;\n \n@@ -146,15 +145,14 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService\n this.hostsProvider = unicastHostsProvider;\n \n this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);\n- final List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);\n- if (hosts.isEmpty()) {\n+ if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) {\n+ configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);\n+ // we only limit to 1 addresses, makes no sense to ping 100 ports\n+ limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;\n+ } else {\n // if unicast hosts are not specified, fill with simple defaults on the local machine\n configuredHosts = transportService.getLocalAddresses();\n limitPortCounts = LIMIT_LOCAL_PORTS_COUNT;\n- } else {\n- configuredHosts = hosts;\n- // we only limit to 1 addresses, makes no sense to ping 100 ports\n- limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;\n }\n resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings);\n logger.debug(\n@@ -164,7 +162,7 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService\n resolveTimeout);\n \n transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest::new, ThreadPool.Names.SAME,\n- new UnicastPingRequestHandler());\n+ new UnicastPingRequestHandler());\n \n final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, \"[unicast_connect]\");\n unicastZenPingExecutorService = EsExecutors.newScaling(\n@@ -186,23 +184,23 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService\n * @param hosts the hosts to resolve\n * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)\n * @param transportService the transport service\n- * @param idGenerator the generator to supply unique ids for each discovery node\n+ * @param nodeId_prefix a prefix to use for node ids\n * @param resolveTimeout the timeout before returning from hostname lookups\n * @return a list of discovery nodes with resolved transport addresses\n */\n- public static List<DiscoveryNode> resolveDiscoveryNodes(\n+ public static List<DiscoveryNode> resolveHostsLists(\n final ExecutorService executorService,\n final Logger logger,\n final List<String> hosts,\n final int limitPortCounts,\n final TransportService transportService,\n- final Supplier<String> idGenerator,\n+ final String nodeId_prefix,\n final TimeValue resolveTimeout) throws InterruptedException {\n Objects.requireNonNull(executorService);\n Objects.requireNonNull(logger);\n Objects.requireNonNull(hosts);\n Objects.requireNonNull(transportService);\n- Objects.requireNonNull(idGenerator);\n+ Objects.requireNonNull(nodeId_prefix);\n Objects.requireNonNull(resolveTimeout);\n if (resolveTimeout.nanos() < 0) {\n throw new IllegalArgumentException(\"resolve timeout must be non-negative but was [\" + resolveTimeout + \"]\");\n@@ -211,7 +209,7 @@ public static List<DiscoveryNode> resolveDiscoveryNodes(\n final List<Callable<TransportAddress[]>> callables =\n hosts\n .stream()\n- .map(hn -> (Callable<TransportAddress[]>)() -> transportService.addressesFromString(hn, limitPortCounts))\n+ .map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts))\n .collect(Collectors.toList());\n final List<Future<TransportAddress[]>> futures =\n executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);\n@@ -226,11 +224,11 @@ public static List<DiscoveryNode> resolveDiscoveryNodes(\n try {\n final TransportAddress[] addresses = future.get();\n logger.trace(\"resolved host [{}] to {}\", hostname, addresses);\n- for (final TransportAddress address : addresses) {\n+ for (int addressId = 0; addressId < addresses.length; addressId++) {\n discoveryNodes.add(\n new DiscoveryNode(\n- idGenerator.get(),\n- address,\n+ nodeId_prefix + hostname + \"_\" + addressId + \"#\",\n+ addresses[addressId],\n emptyMap(),\n emptySet(),\n Version.CURRENT.minimumCompatibilityVersion()));\n@@ -249,8 +247,8 @@ public static List<DiscoveryNode> resolveDiscoveryNodes(\n \n @Override\n public void close() {\n- ThreadPool.terminate(unicastZenPingExecutorService, 0, TimeUnit.SECONDS);\n- Releasables.close(receivedResponses.values());\n+ ThreadPool.terminate(unicastZenPingExecutorService, 10, TimeUnit.SECONDS);\n+ Releasables.close(activePingingRounds.values());\n closed = true;\n }\n \n@@ -266,106 +264,106 @@ public void clearTemporalResponses() {\n temporalResponses.clear();\n }\n \n- // test only\n- Collection<PingResponse> pingAndWait(TimeValue duration) {\n- final AtomicReference<Collection<PingResponse>> response = new AtomicReference<>();\n- final CountDownLatch latch = new CountDownLatch(1);\n- ping(pings -> {\n- response.set(pings);\n- latch.countDown();\n- }, duration);\n- try {\n- latch.await();\n- return response.get();\n- } catch (InterruptedException e) {\n- return null;\n- }\n- }\n-\n /**\n- * Sends three rounds of pings notifying the specified {@link PingListener} when pinging is complete. Pings are sent after resolving\n+ * Sends three rounds of pings notifying the specified {@link Consumer} when pinging is complete. Pings are sent after resolving\n * configured unicast hosts to their IP address (subject to DNS caching within the JVM). A batch of pings is sent, then another batch\n * of pings is sent at half the specified {@link TimeValue}, and then another batch of pings is sent at the specified {@link TimeValue}.\n * The pings that are sent carry a timeout of 1.25 times the specified {@link TimeValue}. When pinging each node, a connection and\n * handshake is performed, with a connection timeout of the specified {@link TimeValue}.\n *\n- * @param listener the callback when pinging is complete\n- * @param duration the timeout for various components of the pings\n+ * @param resultsConsumer the callback when pinging is complete\n+ * @param duration the timeout for various components of the pings\n */\n @Override\n- public void ping(final PingListener listener, final TimeValue duration) {\n- final List<DiscoveryNode> resolvedDiscoveryNodes;\n+ public void ping(final Consumer<PingCollection> resultsConsumer, final TimeValue duration) {\n+ ping(resultsConsumer, duration, duration);\n+ }\n+\n+ /**\n+ * a variant of {@link #ping(Consumer, TimeValue)}, but allows separating the scheduling duration\n+ * from the duration used for request level time outs. This is useful for testing\n+ */\n+ protected void ping(final Consumer<PingCollection> resultsConsumer,\n+ final TimeValue scheduleDuration,\n+ final TimeValue requestDuration) {\n+ final List<DiscoveryNode> seedNodes;\n try {\n- resolvedDiscoveryNodes = resolveDiscoveryNodes(\n+ seedNodes = resolveHostsLists(\n unicastZenPingExecutorService,\n logger,\n configuredHosts,\n limitPortCounts,\n transportService,\n- () -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + \"#\",\n+ UNICAST_NODE_PREFIX,\n resolveTimeout);\n } catch (InterruptedException e) {\n throw new RuntimeException(e);\n }\n- final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingHandlerIdGenerator.incrementAndGet());\n- try {\n- receivedResponses.put(sendPingsHandler.id(), sendPingsHandler);\n- try {\n- sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes);\n- } catch (RejectedExecutionException e) {\n- logger.debug(\"Ping execution rejected\", e);\n- // The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings\n- // But don't bail here, we can retry later on after the send ping has been scheduled.\n+ seedNodes.addAll(hostsProvider.buildDynamicNodes());\n+ final DiscoveryNodes nodes = contextProvider.nodes();\n+ // add all possible master nodes that were active in the last known cluster configuration\n+ for (ObjectCursor<DiscoveryNode> masterNode : nodes.getMasterNodes().values()) {\n+ seedNodes.add(masterNode.value);\n+ }\n+\n+ final ConnectionProfile connectionProfile =\n+ ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, requestDuration, requestDuration);\n+ final PingingRound pingingRound = new PingingRound(pingingRoundIdGenerator.incrementAndGet(), seedNodes, resultsConsumer,\n+ nodes.getLocalNode(), connectionProfile);\n+ activePingingRounds.put(pingingRound.id(), pingingRound);\n+ final AbstractRunnable pingSender = new AbstractRunnable() {\n+ @Override\n+ public void onFailure(Exception e) {\n+ if (e instanceof AlreadyClosedException == false) {\n+ logger.warn(\"unexpected error while pinging\", e);\n+ }\n }\n \n- threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() {\n- @Override\n- protected void doRun() {\n- sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes);\n- threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() {\n- @Override\n- protected void doRun() throws Exception {\n- sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler, resolvedDiscoveryNodes);\n- sendPingsHandler.close();\n- listener.onPing(sendPingsHandler.pingCollection().toList());\n- for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {\n- logger.trace(\"[{}] disconnecting from {}\", sendPingsHandler.id(), node);\n- transportService.disconnectFromNode(node);\n- }\n- }\n+ @Override\n+ protected void doRun() throws Exception {\n+ sendPings(requestDuration, pingingRound);\n+ }\n+ };\n+ threadPool.generic().execute(pingSender);\n+ threadPool.schedule(TimeValue.timeValueMillis(scheduleDuration.millis() / 3), ThreadPool.Names.GENERIC, pingSender);\n+ threadPool.schedule(TimeValue.timeValueMillis(scheduleDuration.millis() / 3 * 2), ThreadPool.Names.GENERIC, pingSender);\n+ threadPool.schedule(scheduleDuration, ThreadPool.Names.GENERIC, new AbstractRunnable() {\n+ @Override\n+ protected void doRun() throws Exception {\n+ finishPingingRound(pingingRound);\n+ }\n \n- @Override\n- public void onFailure(Exception e) {\n- logger.debug(\"Ping execution failed\", e);\n- sendPingsHandler.close();\n- }\n- });\n- }\n+ @Override\n+ public void onFailure(Exception e) {\n+ logger.warn(\"unexpected error while finishing pinging round\", e);\n+ }\n+ });\n+ }\n \n- @Override\n- public void onFailure(Exception e) {\n- logger.debug(\"Ping execution failed\", e);\n- sendPingsHandler.close();\n- }\n- });\n- } catch (EsRejectedExecutionException ex) { // TODO: remove this once ScheduledExecutor has support for AbstractRunnable\n- sendPingsHandler.close();\n- // we are shutting down\n- } catch (Exception e) {\n- sendPingsHandler.close();\n- throw new ElasticsearchException(\"Ping execution failed\", e);\n- }\n+ // for testing\n+ protected void finishPingingRound(PingingRound pingingRound) {\n+ pingingRound.close();\n }\n \n- class SendPingsHandler implements Releasable {\n+ protected class PingingRound implements Releasable {\n private final int id;\n- private final Set<DiscoveryNode> nodeToDisconnect = ConcurrentCollections.newConcurrentSet();\n+ private final Map<TransportAddress, Connection> tempConnections = new HashMap<>();\n+ private final KeyedLock<TransportAddress> connectionLock = new KeyedLock<>(true);\n private final PingCollection pingCollection;\n+ private final List<DiscoveryNode> seedNodes;\n+ private final Consumer<PingCollection> pingListener;\n+ private final DiscoveryNode localNode;\n+ private final ConnectionProfile connectionProfile;\n \n private AtomicBoolean closed = new AtomicBoolean(false);\n \n- SendPingsHandler(int id) {\n+ PingingRound(int id, List<DiscoveryNode> seedNodes, Consumer<PingCollection> resultsConsumer, DiscoveryNode localNode,\n+ ConnectionProfile connectionProfile) {\n this.id = id;\n+ this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes));\n+ this.pingListener = resultsConsumer;\n+ this.localNode = localNode;\n+ this.connectionProfile = connectionProfile;\n this.pingCollection = new PingCollection();\n }\n \n@@ -377,154 +375,170 @@ public boolean isClosed() {\n return this.closed.get();\n }\n \n- public PingCollection pingCollection() {\n- return pingCollection;\n+ public List<DiscoveryNode> getSeedNodes() {\n+ ensureOpen();\n+ return seedNodes;\n+ }\n+\n+ public Connection getOrConnect(DiscoveryNode node) throws IOException {\n+ Connection result;\n+ try (Releasable ignore = connectionLock.acquire(node.getAddress())) {\n+ result = tempConnections.get(node.getAddress());\n+ if (result == null) {\n+ boolean success = false;\n+ result = transportService.openConnection(node, connectionProfile);\n+ try {\n+ transportService.handshake(result, connectionProfile.getHandshakeTimeout().millis());\n+ synchronized (this) {\n+ // acquire lock to prevent concurrent closing\n+ Connection existing = tempConnections.put(node.getAddress(), result);\n+ assert existing == null;\n+ success = true;\n+ }\n+ } finally {\n+ if (success == false) {\n+ IOUtils.closeWhileHandlingException(result);\n+ }\n+ }\n+ }\n+ }\n+ return result;\n+ }\n+\n+ private void ensureOpen() {\n+ if (isClosed()) {\n+ throw new AlreadyClosedException(\"pinging round [\" + id + \"] is finished\");\n+ }\n+ }\n+\n+ public void addPingResponseToCollection(PingResponse pingResponse) {\n+ if (localNode.equals(pingResponse.node()) == false) {\n+ pingCollection.addPing(pingResponse);\n+ }\n }\n \n @Override\n public void close() {\n- if (closed.compareAndSet(false, true)) {\n- receivedResponses.remove(id);\n+ List<Connection> toClose = null;\n+ synchronized (this) {\n+ if (closed.compareAndSet(false, true)) {\n+ activePingingRounds.remove(id);\n+ toClose = new ArrayList<>(tempConnections.values());\n+ tempConnections.clear();\n+ }\n }\n+ if (toClose != null) {\n+ // we actually closed\n+ try {\n+ pingListener.accept(pingCollection);\n+ } finally {\n+ IOUtils.closeWhileHandlingException(toClose);\n+ }\n+ }\n+ }\n+\n+ public ConnectionProfile getConnectionProfile() {\n+ return connectionProfile;\n }\n }\n \n \n- void sendPings(\n- final TimeValue timeout,\n- @Nullable TimeValue waitTime,\n- final SendPingsHandler sendPingsHandler,\n- final List<DiscoveryNode> resolvedDiscoveryNodes) {\n+ protected void sendPings(final TimeValue timeout, final PingingRound pingingRound) {\n final UnicastPingRequest pingRequest = new UnicastPingRequest();\n- pingRequest.id = sendPingsHandler.id();\n+ pingRequest.id = pingingRound.id();\n pingRequest.timeout = timeout;\n DiscoveryNodes discoNodes = contextProvider.nodes();\n \n pingRequest.pingResponse = createPingResponse(discoNodes);\n \n- HashSet<DiscoveryNode> nodesToPingSet = new HashSet<>();\n- for (PingResponse temporalResponse : temporalResponses) {\n- // Only send pings to nodes that have the same cluster name.\n- if (clusterName.equals(temporalResponse.clusterName())) {\n- nodesToPingSet.add(temporalResponse.node());\n- }\n- }\n- nodesToPingSet.addAll(hostsProvider.buildDynamicNodes());\n+ Set<DiscoveryNode> nodesFromResponses = temporalResponses.stream().map(pingResponse -> {\n+ assert clusterName.equals(pingResponse.clusterName()) :\n+ \"got a ping request from a different cluster. expected \" + clusterName + \" got \" + pingResponse.clusterName();\n+ return pingResponse.node();\n+ }).collect(Collectors.toSet());\n \n- // add all possible master nodes that were active in the last known cluster configuration\n- for (ObjectCursor<DiscoveryNode> masterNode : discoNodes.getMasterNodes().values()) {\n- nodesToPingSet.add(masterNode.value);\n- }\n+ // dedup by address\n+ final Map<TransportAddress, DiscoveryNode> uniqueNodesByAddress =\n+ Stream.concat(pingingRound.getSeedNodes().stream(), nodesFromResponses.stream())\n+ .collect(Collectors.toMap(DiscoveryNode::getAddress, Function.identity(), (n1, n2) -> n1));\n \n- // sort the nodes by likelihood of being an active master\n- List<DiscoveryNode> sortedNodesToPing = ElectMasterService.sortByMasterLikelihood(nodesToPingSet);\n-\n- // add the configured hosts first\n- final List<DiscoveryNode> nodesToPing = new ArrayList<>(resolvedDiscoveryNodes.size() + sortedNodesToPing.size());\n- nodesToPing.addAll(resolvedDiscoveryNodes);\n- nodesToPing.addAll(sortedNodesToPing);\n-\n- final CountDownLatch latch = new CountDownLatch(nodesToPing.size());\n- for (final DiscoveryNode node : nodesToPing) {\n- // make sure we are connected\n- final boolean nodeFoundByAddress;\n- DiscoveryNode nodeToSend = discoNodes.findByAddress(node.getAddress());\n- if (nodeToSend != null) {\n- nodeFoundByAddress = true;\n- } else {\n- nodeToSend = node;\n- nodeFoundByAddress = false;\n- }\n \n- if (!transportService.nodeConnected(nodeToSend)) {\n- if (sendPingsHandler.isClosed()) {\n- return;\n+ // resolve what we can via the latest cluster state\n+ final Set<DiscoveryNode> nodesToPing = uniqueNodesByAddress.values().stream()\n+ .map(node -> {\n+ DiscoveryNode foundNode = discoNodes.findByAddress(node.getAddress());\n+ if (foundNode == null) {\n+ return node;\n+ } else {\n+ return foundNode;\n }\n- // if we find on the disco nodes a matching node by address, we are going to restore the connection\n- // anyhow down the line if its not connected...\n- // if we can't resolve the node, we don't know and we have to clean up after pinging. We do have\n- // to make sure we don't disconnect a true node which was temporarily removed from the DiscoveryNodes\n- // but will be added again during the pinging. We therefore create a new temporary node\n- if (!nodeFoundByAddress) {\n- if (!nodeToSend.getId().startsWith(UNICAST_NODE_PREFIX)) {\n- DiscoveryNode tempNode = new DiscoveryNode(\"\",\n- UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + \"_\" + nodeToSend.getId() + \"#\",\n- UUIDs.randomBase64UUID(), nodeToSend.getHostName(), nodeToSend.getHostAddress(), nodeToSend.getAddress(),\n- nodeToSend.getAttributes(), nodeToSend.getRoles(), nodeToSend.getVersion());\n-\n- logger.trace(\"replacing {} with temp node {}\", nodeToSend, tempNode);\n- nodeToSend = tempNode;\n+ }).collect(Collectors.toSet());\n+\n+ nodesToPing.forEach(node -> sendPingRequestToNode(node, timeout, pingingRound, pingRequest));\n+ }\n+\n+ private void sendPingRequestToNode(final DiscoveryNode node, TimeValue timeout, final PingingRound pingingRound,\n+ final UnicastPingRequest pingRequest) {\n+ submitToExecutor(new AbstractRunnable() {\n+ @Override\n+ protected void doRun() throws Exception {\n+ Connection connection = null;\n+ if (transportService.nodeConnected(node)) {\n+ try {\n+ // concurrency can still cause disconnects\n+ connection = transportService.getConnection(node);\n+ } catch (NodeNotConnectedException e) {\n+ logger.trace(\"[{}] node [{}] just disconnected, will create a temp connection\", pingingRound.id(), node);\n }\n- sendPingsHandler.nodeToDisconnect.add(nodeToSend);\n }\n- // fork the connection to another thread\n- final DiscoveryNode finalNodeToSend = nodeToSend;\n- unicastZenPingExecutorService.execute(new Runnable() {\n- @Override\n- public void run() {\n- if (sendPingsHandler.isClosed()) {\n- return;\n- }\n- boolean success = false;\n- try {\n- // connect to the node, see if we manage to do it, if not, bail\n- if (!nodeFoundByAddress) {\n- logger.trace(\"[{}] connecting (light) to {}\", sendPingsHandler.id(), finalNodeToSend);\n- transportService.connectToNodeAndHandshake(finalNodeToSend, timeout.getMillis());\n- } else {\n- logger.trace(\"[{}] connecting to {}\", sendPingsHandler.id(), finalNodeToSend);\n- transportService.connectToNode(finalNodeToSend);\n- }\n- logger.trace(\"[{}] connected to {}\", sendPingsHandler.id(), node);\n- if (receivedResponses.containsKey(sendPingsHandler.id())) {\n- // we are connected and still in progress, send the ping request\n- sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, finalNodeToSend);\n- } else {\n- // connect took too long, just log it and bail\n- latch.countDown();\n- logger.trace(\"[{}] connect to {} was too long outside of ping window, bailing\",\n- sendPingsHandler.id(), node);\n- }\n- success = true;\n- } catch (ConnectTransportException e) {\n- // can't connect to the node - this is a more common path!\n- logger.trace(\n- (Supplier<?>) () -> new ParameterizedMessage(\n- \"[{}] failed to connect to {}\", sendPingsHandler.id(), finalNodeToSend), e);\n- } catch (RemoteTransportException e) {\n- // something went wrong on the other side\n- logger.debug(\n- (Supplier<?>) () -> new ParameterizedMessage(\n- \"[{}] received a remote error as a response to ping {}\", sendPingsHandler.id(), finalNodeToSend), e);\n- } catch (Exception e) {\n- logger.warn(\n- (Supplier<?>) () -> new ParameterizedMessage(\n- \"[{}] failed send ping to {}\", sendPingsHandler.id(), finalNodeToSend), e);\n- } finally {\n- if (!success) {\n- latch.countDown();\n- }\n- }\n- }\n- });\n- } else {\n- sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, nodeToSend);\n+\n+ if (connection == null) {\n+ connection = pingingRound.getOrConnect(node);\n+ }\n+\n+ logger.trace(\"[{}] sending to {}\", pingingRound.id(), node);\n+ transportService.sendRequest(connection, ACTION_NAME, pingRequest,\n+ TransportRequestOptions.builder().withTimeout((long) (timeout.millis() * 1.25)).build(),\n+ getPingResponseHandler(pingingRound, node));\n }\n- }\n- if (waitTime != null) {\n- try {\n- latch.await(waitTime.millis(), TimeUnit.MILLISECONDS);\n- } catch (InterruptedException e) {\n- // ignore\n+\n+ @Override\n+ public void onFailure(Exception e) {\n+ if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) {\n+ // can't connect to the node - this is more common path!\n+ logger.trace(\n+ (Supplier<?>) () -> new ParameterizedMessage(\n+ \"[{}] failed to ping {}\", pingingRound.id(), node), e);\n+ } else if (e instanceof RemoteTransportException) {\n+ // something went wrong on the other side\n+ logger.debug(\n+ (Supplier<?>) () -> new ParameterizedMessage(\n+ \"[{}] received a remote error as a response to ping {}\", pingingRound.id(), node), e);\n+ } else {\n+ logger.warn(\n+ (Supplier<?>) () -> new ParameterizedMessage(\n+ \"[{}] failed send ping to {}\", pingingRound.id(), node), e);\n+ }\n }\n- }\n+\n+ @Override\n+ public void onRejection(Exception e) {\n+ // The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings\n+ // But don't bail here, we can retry later on after the send ping has been scheduled.\n+ logger.debug(\"Ping execution rejected\", e);\n+ }\n+ });\n }\n \n- private void sendPingRequestToNode(final int id, final TimeValue timeout, final UnicastPingRequest pingRequest,\n- final CountDownLatch latch, final DiscoveryNode node, final DiscoveryNode nodeToSend) {\n- logger.trace(\"[{}] sending to {}\", id, nodeToSend);\n- transportService.sendRequest(nodeToSend, ACTION_NAME, pingRequest, TransportRequestOptions.builder()\n- .withTimeout((long) (timeout.millis() * 1.25)).build(), new TransportResponseHandler<UnicastPingResponse>() {\n+ // for testing\n+ protected void submitToExecutor(AbstractRunnable abstractRunnable) {\n+ unicastZenPingExecutorService.execute(abstractRunnable);\n+ }\n+\n+ // for testing\n+ protected TransportResponseHandler<UnicastPingResponse> getPingResponseHandler(final PingingRound pingingRound,\n+ final DiscoveryNode node) {\n+ return new TransportResponseHandler<UnicastPingResponse>() {\n \n @Override\n public UnicastPingResponse newInstance() {\n@@ -538,50 +552,36 @@ public String executor() {\n \n @Override\n public void handleResponse(UnicastPingResponse response) {\n- logger.trace(\"[{}] received response from {}: {}\", id, nodeToSend, Arrays.toString(response.pingResponses));\n- try {\n- DiscoveryNodes discoveryNodes = contextProvider.nodes();\n- for (PingResponse pingResponse : response.pingResponses) {\n- if (pingResponse.node().equals(discoveryNodes.getLocalNode())) {\n- // that's us, ignore\n- continue;\n- }\n- SendPingsHandler sendPingsHandler = receivedResponses.get(response.id);\n- if (sendPingsHandler == null) {\n- if (!closed) {\n- // Only log when we're not closing the node. Having no send ping handler is then expected\n- logger.warn(\"received ping response {} with no matching handler id [{}]\", pingResponse, response.id);\n- }\n- } else {\n- sendPingsHandler.pingCollection().addPing(pingResponse);\n- }\n+ logger.trace(\"[{}] received response from {}: {}\", pingingRound.id(), node, Arrays.toString(response.pingResponses));\n+ if (pingingRound.isClosed()) {\n+ if (logger.isTraceEnabled()) {\n+ logger.trace(\"[{}] skipping received response from {}. already closed\", pingingRound.id(), node);\n }\n- } finally {\n- latch.countDown();\n+ } else {\n+ Stream.of(response.pingResponses).forEach(pingingRound::addPingResponseToCollection);\n }\n }\n \n @Override\n public void handleException(TransportException exp) {\n- latch.countDown();\n- if (exp instanceof ConnectTransportException) {\n+ if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) {\n // ok, not connected...\n- logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"failed to connect to {}\", nodeToSend), exp);\n- } else {\n+ logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"failed to connect to {}\", node), exp);\n+ } else if (closed == false) {\n logger.warn((Supplier<?>) () -> new ParameterizedMessage(\"failed to send ping to [{}]\", node), exp);\n }\n }\n- });\n+ };\n }\n \n private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) {\n+ assert clusterName.equals(request.pingResponse.clusterName()) :\n+ \"got a ping request from a different cluster. expected \" + clusterName + \" got \" + request.pingResponse.clusterName();\n temporalResponses.add(request.pingResponse);\n- threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() {\n- @Override\n- public void run() {\n- temporalResponses.remove(request.pingResponse);\n- }\n- });\n+ // add to any ongoing pinging\n+ activePingingRounds.values().forEach(p -> p.addPingResponseToCollection(request.pingResponse));\n+ threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME,\n+ () -> temporalResponses.remove(request.pingResponse));\n \n List<PingResponse> pingResponses = CollectionUtils.iterableAsArrayList(temporalResponses);\n pingResponses.add(createPingResponse(contextProvider.nodes()));\n@@ -601,11 +601,11 @@ public void messageReceived(UnicastPingRequest request, TransportChannel channel\n channel.sendResponse(handlePingRequest(request));\n } else {\n throw new IllegalStateException(\n- String.format(\n- Locale.ROOT,\n- \"mismatched cluster names; request: [%s], local: [%s]\",\n- request.pingResponse.clusterName().value(),\n- clusterName.value()));\n+ String.format(\n+ Locale.ROOT,\n+ \"mismatched cluster names; request: [%s], local: [%s]\",\n+ request.pingResponse.clusterName().value(),\n+ clusterName.value()));\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java", "status": "modified" }, { "diff": "@@ -67,11 +67,11 @@\n import java.util.ArrayList;\n import java.util.List;\n import java.util.Set;\n-import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.CompletableFuture;\n+import java.util.concurrent.ExecutionException;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicInteger;\n import java.util.concurrent.atomic.AtomicReference;\n-import java.util.function.BiFunction;\n import java.util.function.Consumer;\n import java.util.stream.Collectors;\n \n@@ -1021,24 +1021,22 @@ public void handleException(TransportException exp) {\n }\n \n private ZenPing.PingCollection pingAndWait(TimeValue timeout) {\n- final ZenPing.PingCollection response = new ZenPing.PingCollection();\n- final CountDownLatch latch = new CountDownLatch(1);\n+ final CompletableFuture<ZenPing.PingCollection> response = new CompletableFuture<>();\n try {\n- zenPing.ping(pings -> {\n- response.addPings(pings);\n- latch.countDown();\n- }, timeout);\n+ zenPing.ping(response::complete, timeout);\n } catch (Exception ex) {\n- logger.warn(\"Ping execution failed\", ex);\n- latch.countDown();\n+ // logged later\n+ response.completeExceptionally(ex);\n }\n \n try {\n- latch.await();\n- return response;\n+ return response.get();\n } catch (InterruptedException e) {\n logger.trace(\"pingAndWait interrupted\");\n- return response;\n+ return new ZenPing.PingCollection();\n+ } catch (ExecutionException e) {\n+ logger.warn(\"Ping execution failed\", e);\n+ return new ZenPing.PingCollection();\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java", "status": "modified" }, { "diff": "@@ -30,29 +30,19 @@\n \n import java.io.IOException;\n import java.util.ArrayList;\n-import java.util.Collection;\n import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n import java.util.concurrent.atomic.AtomicLong;\n+import java.util.function.Consumer;\n \n import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;\n \n public interface ZenPing extends Releasable {\n \n void start(PingContextProvider contextProvider);\n \n- void ping(PingListener listener, TimeValue timeout);\n-\n- interface PingListener {\n-\n- /**\n- * called when pinging is done.\n- *\n- * @param pings ping result *must\n- */\n- void onPing(Collection<PingResponse> pings);\n- }\n+ void ping(Consumer<PingCollection> resultsConsumer, TimeValue timeout);\n \n class PingResponse implements Streamable {\n \n@@ -191,13 +181,6 @@ public synchronized boolean addPing(PingResponse ping) {\n return false;\n }\n \n- /** adds multiple pings if newer than previous pings from the same node */\n- public synchronized void addPings(Iterable<PingResponse> pings) {\n- for (PingResponse ping : pings) {\n- addPing(ping);\n- }\n- }\n-\n /** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */\n public synchronized List<PingResponse> toList() {\n return new ArrayList<>(pings.values());", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.transport;\n \n+import org.elasticsearch.common.inject.internal.Nullable;\n import org.elasticsearch.common.unit.TimeValue;\n \n import java.util.ArrayList;\n@@ -35,16 +36,25 @@\n public final class ConnectionProfile {\n \n /**\n- * A pre-built light connection profile that shares a single connection across all\n- * types.\n+ * Builds a connection profile that is dedicated to a single channel type. Use this\n+ * when opening single use connections\n */\n- public static final ConnectionProfile LIGHT_PROFILE = new ConnectionProfile(\n- Collections.singletonList(new ConnectionTypeHandle(0, 1, EnumSet.of(\n- TransportRequestOptions.Type.BULK,\n- TransportRequestOptions.Type.PING,\n- TransportRequestOptions.Type.RECOVERY,\n- TransportRequestOptions.Type.REG,\n- TransportRequestOptions.Type.STATE))), 1, null, null);\n+ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType,\n+ @Nullable TimeValue connectTimeout,\n+ @Nullable TimeValue handshakeTimeout) {\n+ Builder builder = new Builder();\n+ builder.addConnections(1, channelType);\n+ final EnumSet<TransportRequestOptions.Type> otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class);\n+ otherTypes.remove(channelType);\n+ builder.addConnections(0, otherTypes.stream().toArray(TransportRequestOptions.Type[]::new));\n+ if (connectTimeout != null) {\n+ builder.setConnectTimeout(connectTimeout);\n+ }\n+ if (handshakeTimeout != null) {\n+ builder.setHandshakeTimeout(handshakeTimeout);\n+ }\n+ return builder.build();\n+ }\n \n private final List<ConnectionTypeHandle> handles;\n private final int numConnections;", "filename": "core/src/main/java/org/elasticsearch/transport/ConnectionProfile.java", "status": "modified" }, { "diff": "@@ -63,8 +63,7 @@ public interface Transport extends LifecycleComponent {\n boolean nodeConnected(DiscoveryNode node);\n \n /**\n- * Connects to a node with the given connection profile. Use {@link ConnectionProfile#LIGHT_PROFILE} when just connecting for ping\n- * and then disconnecting. If the node is already connected this method has no effect\n+ * Connects to a node with the given connection profile. If the node is already connected this method has no effect\n */\n void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile) throws ConnectTransportException;\n ", "filename": "core/src/main/java/org/elasticsearch/transport/Transport.java", "status": "modified" }, { "diff": "@@ -62,7 +62,6 @@\n import java.util.concurrent.CopyOnWriteArrayList;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ScheduledFuture;\n-import java.util.concurrent.atomic.AtomicLong;\n import java.util.function.Function;\n import java.util.function.Supplier;\n \n@@ -328,32 +327,6 @@ public Transport.Connection openConnection(final DiscoveryNode node, ConnectionP\n }\n }\n \n- /**\n- * Lightly connect to the specified node, returning updated node\n- * information. The handshake will fail if the cluster name on the\n- * target node mismatches the local cluster name and\n- * {@code checkClusterName} is {@code true}.\n- *\n- * @param node the node to connect to\n- * @param handshakeTimeout handshake timeout\n- * @return the connected node\n- * @throws ConnectTransportException if the connection failed\n- * @throws IllegalStateException if the handshake failed\n- */\n- public DiscoveryNode connectToNodeAndHandshake(\n- final DiscoveryNode node,\n- final long handshakeTimeout) throws IOException {\n- if (node.equals(localNode)) {\n- return localNode;\n- }\n- DiscoveryNode handshakeNode;\n- try (Transport.Connection connection = transport.openConnection(node, ConnectionProfile.LIGHT_PROFILE)) {\n- handshakeNode = handshake(connection, handshakeTimeout);\n- }\n- connectToNode(node, ConnectionProfile.LIGHT_PROFILE);\n- return handshakeNode;\n- }\n-\n /**\n * Executes a high-level handshake using the given connection\n * and returns the discovery node of the node the connection", "filename": "core/src/main/java/org/elasticsearch/transport/TransportService.java", "status": "modified" }, { "diff": "@@ -26,6 +26,7 @@\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.block.ClusterBlocks;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.cluster.node.DiscoveryNode.Role;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n import org.elasticsearch.common.network.NetworkAddress;\n@@ -34,18 +35,22 @@\n import org.elasticsearch.common.transport.TransportAddress;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.BigArrays;\n+import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.common.util.concurrent.EsExecutors;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.VersionUtils;\n-import org.elasticsearch.test.junit.annotations.TestLogging;\n import org.elasticsearch.test.transport.MockTransportService;\n import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n+import org.elasticsearch.transport.ConnectionProfile;\n import org.elasticsearch.transport.MockTcpTransport;\n import org.elasticsearch.transport.Transport;\n import org.elasticsearch.transport.TransportConnectionListener;\n+import org.elasticsearch.transport.TransportException;\n+import org.elasticsearch.transport.TransportRequestOptions;\n+import org.elasticsearch.transport.TransportResponseHandler;\n import org.elasticsearch.transport.TransportService;\n import org.elasticsearch.transport.TransportSettings;\n import org.junit.After;\n@@ -60,12 +65,14 @@\n import java.util.Arrays;\n import java.util.Collection;\n import java.util.Collections;\n+import java.util.EnumSet;\n import java.util.HashMap;\n import java.util.HashSet;\n import java.util.List;\n import java.util.Map;\n import java.util.Set;\n import java.util.Stack;\n+import java.util.concurrent.CompletableFuture;\n import java.util.concurrent.ConcurrentMap;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ExecutionException;\n@@ -82,7 +89,6 @@\n import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;\n import static org.hamcrest.Matchers.empty;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.greaterThan;\n import static org.hamcrest.Matchers.hasSize;\n import static org.mockito.Matchers.eq;\n import static org.mockito.Mockito.mock;\n@@ -124,8 +130,7 @@ public void tearDown() throws Exception {\n \n private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList;\n \n- @TestLogging(\"org.elasticsearch.transport:TRACE,org.elasticsearch.discovery.zen.UnicastZenPing:TRACE\")\n- public void testSimplePings() throws IOException, InterruptedException {\n+ public void testSimplePings() throws IOException, InterruptedException, ExecutionException {\n // use ephemeral ports\n final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n final Settings settingsMismatch =\n@@ -140,33 +145,43 @@ public void testSimplePings() throws IOException, InterruptedException {\n new NoneCircuitBreakerService(),\n new NamedWriteableRegistry(Collections.emptyList()),\n networkService,\n- v);\n+ v) {\n+ @Override\n+ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile) {\n+ throw new AssertionError(\"zen pings should never connect to node (got [\" + node + \"])\");\n+ }\n+ };\n \n NetworkHandle handleA = startServices(settings, threadPool, \"UZP_A\", Version.CURRENT, supplier);\n closeables.push(handleA.transportService);\n NetworkHandle handleB = startServices(settings, threadPool, \"UZP_B\", Version.CURRENT, supplier);\n closeables.push(handleB.transportService);\n NetworkHandle handleC = startServices(settingsMismatch, threadPool, \"UZP_C\", Version.CURRENT, supplier);\n closeables.push(handleC.transportService);\n- // just fake that no versions are compatible with this node\n- Version previousVersion = VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion());\n- Version versionD = VersionUtils.randomVersionBetween(random(), previousVersion.minimumCompatibilityVersion(), previousVersion);\n+ final Version versionD;\n+ if (randomBoolean()) {\n+ versionD = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT);\n+ } else {\n+ versionD = Version.CURRENT;\n+ }\n+ logger.info(\"UZP_D version set to [{}]\", versionD);\n NetworkHandle handleD = startServices(settingsMismatch, threadPool, \"UZP_D\", versionD, supplier);\n closeables.push(handleD.transportService);\n \n final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n+ final ClusterState stateMismatch = ClusterState.builder(new ClusterName(\"mismatch\")).version(randomPositiveLong()).build();\n \n Settings hostsSettings = Settings.builder()\n- .putArray(\"discovery.zen.ping.unicast.hosts\",\n+ .putArray(\"discovery.zen.ping.unicast.hosts\",\n NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())),\n NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())),\n NetworkAddress.format(new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort())),\n NetworkAddress.format(new InetSocketAddress(handleD.address.address().getAddress(), handleD.address.address().getPort())))\n- .put(\"cluster.name\", \"test\")\n- .build();\n+ .put(\"cluster.name\", \"test\")\n+ .build();\n \n Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build();\n- UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n zenPingA.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -180,7 +195,7 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingA);\n \n- UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n zenPingB.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -194,7 +209,8 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingB);\n \n- UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER) {\n+ TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC,\n+ EMPTY_HOSTS_PROVIDER) {\n @Override\n protected Version getVersion() {\n return versionD;\n@@ -208,12 +224,13 @@ public DiscoveryNodes nodes() {\n \n @Override\n public ClusterState clusterState() {\n- return state;\n+ return stateMismatch;\n }\n });\n closeables.push(zenPingC);\n \n- UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD,\n+ EMPTY_HOSTS_PROVIDER);\n zenPingD.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -222,40 +239,48 @@ public DiscoveryNodes nodes() {\n \n @Override\n public ClusterState clusterState() {\n- return state;\n+ return stateMismatch;\n }\n });\n closeables.push(zenPingD);\n \n logger.info(\"ping from UZP_A\");\n- Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueMillis(500));\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n assertThat(pingResponses.size(), equalTo(1));\n ZenPing.PingResponse ping = pingResponses.iterator().next();\n assertThat(ping.node().getId(), equalTo(\"UZP_B\"));\n assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n- assertCountersMoreThan(handleA, handleB, handleC, handleD);\n+ assertPingCount(handleA, handleB, 3);\n+ assertPingCount(handleA, handleC, 0); // mismatch, shouldn't ping\n+ assertPingCount(handleA, handleD, 0); // mismatch, shouldn't ping\n \n // ping again, this time from B,\n logger.info(\"ping from UZP_B\");\n- pingResponses = zenPingB.pingAndWait(TimeValue.timeValueMillis(500));\n+ pingResponses = zenPingB.pingAndWait().toList();\n assertThat(pingResponses.size(), equalTo(1));\n ping = pingResponses.iterator().next();\n assertThat(ping.node().getId(), equalTo(\"UZP_A\"));\n assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));\n- assertCountersMoreThan(handleB, handleA, handleC, handleD);\n+ assertPingCount(handleB, handleA, 3);\n+ assertPingCount(handleB, handleC, 0); // mismatch, shouldn't ping\n+ assertPingCount(handleB, handleD, 0); // mismatch, shouldn't ping\n \n logger.info(\"ping from UZP_C\");\n- pingResponses = zenPingC.pingAndWait(TimeValue.timeValueMillis(500));\n- assertThat(pingResponses.size(), equalTo(0));\n- assertCountersMoreThan(handleC, handleA, handleB, handleD);\n+ pingResponses = zenPingC.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ assertPingCount(handleC, handleA, 0);\n+ assertPingCount(handleC, handleB, 0);\n+ assertPingCount(handleC, handleD, 3);\n \n logger.info(\"ping from UZP_D\");\n- pingResponses = zenPingD.pingAndWait(TimeValue.timeValueMillis(500));\n- assertThat(pingResponses.size(), equalTo(0));\n- assertCountersMoreThan(handleD, handleA, handleB, handleC);\n+ pingResponses = zenPingD.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ assertPingCount(handleD, handleA, 0);\n+ assertPingCount(handleD, handleB, 0);\n+ assertPingCount(handleD, handleC, 3);\n }\n \n- public void testUnknownHostNotCached() {\n+ public void testUnknownHostNotCached() throws ExecutionException, InterruptedException {\n // use ephemeral ports\n final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n \n@@ -306,7 +331,7 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n \n final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n \n- final UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER);\n+ final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n zenPingA.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -320,7 +345,7 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingA);\n \n- UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n zenPingB.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -334,7 +359,7 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingB);\n \n- UnicastZenPing zenPingC = new UnicastZenPing(hostsSettings, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, EMPTY_HOSTS_PROVIDER);\n zenPingC.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -350,12 +375,13 @@ public ClusterState clusterState() {\n \n // the presence of an unresolvable host should not prevent resolvable hosts from being pinged\n {\n- final Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueMillis(500));\n+ final Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n assertThat(pingResponses.size(), equalTo(1));\n ZenPing.PingResponse ping = pingResponses.iterator().next();\n assertThat(ping.node().getId(), equalTo(\"UZP_C\"));\n assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n- assertCountersMoreThan(handleA, handleC);\n+ assertPingCount(handleA, handleB, 0);\n+ assertPingCount(handleA, handleC, 3);\n assertNull(handleA.counters.get(handleB.address));\n }\n \n@@ -373,11 +399,13 @@ public ClusterState clusterState() {\n \n // now we should see pings to UZP_B; this establishes that host resolutions are not cached\n {\n- final Collection<ZenPing.PingResponse> secondPingResponses = zenPingA.pingAndWait(TimeValue.timeValueMillis(500));\n+ handleA.counters.clear();\n+ final Collection<ZenPing.PingResponse> secondPingResponses = zenPingA.pingAndWait().toList();\n assertThat(secondPingResponses.size(), equalTo(2));\n final Set<String> ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList()));\n assertThat(ids, equalTo(new HashSet<>(Arrays.asList(\"UZP_B\", \"UZP_C\"))));\n- assertCountersMoreThan(moreThan, handleA, handleB, handleC);\n+ assertPingCount(handleA, handleB, 3);\n+ assertPingCount(handleA, handleC, 3);\n }\n }\n \n@@ -395,15 +423,14 @@ public void testPortLimit() throws InterruptedException {\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n final int limitPortCounts = randomIntBetween(1, 10);\n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Collections.singletonList(\"127.0.0.1\"),\n limitPortCounts,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test_\",\n TimeValue.timeValueSeconds(1));\n assertThat(discoveryNodes, hasSize(limitPortCounts));\n final Set<Integer> ports = new HashSet<>();\n@@ -439,15 +466,14 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n \n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Arrays.asList(hostname),\n 1,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test_\",\n TimeValue.timeValueSeconds(1)\n );\n \n@@ -490,16 +516,15 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 3));\n try {\n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Arrays.asList(\"hostname1\", \"hostname2\"),\n 1,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test+\",\n resolveTimeout);\n \n assertThat(discoveryNodes, hasSize(1));\n@@ -513,6 +538,156 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n }\n }\n \n+ public void testResolveReuseExistingNodeConnections() throws ExecutionException, InterruptedException {\n+ final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n+\n+ NetworkService networkService = new NetworkService(settings, Collections.emptyList());\n+\n+ final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(\n+ s,\n+ threadPool,\n+ BigArrays.NON_RECYCLING_INSTANCE,\n+ new NoneCircuitBreakerService(),\n+ new NamedWriteableRegistry(Collections.emptyList()),\n+ networkService,\n+ v);\n+\n+ NetworkHandle handleA = startServices(settings, threadPool, \"UZP_A\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleA.transportService);\n+ NetworkHandle handleB = startServices(settings, threadPool, \"UZP_B\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleB.transportService);\n+\n+ final boolean useHosts = randomBoolean();\n+ final Settings.Builder hostsSettingsBuilder = Settings.builder().put(\"cluster.name\", \"test\");\n+ if (useHosts) {\n+ hostsSettingsBuilder.putArray(\"discovery.zen.ping.unicast.hosts\",\n+ NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort()))\n+ );\n+ } else {\n+ hostsSettingsBuilder.put(\"discovery.zen.ping.unicast.hosts\", (String) null);\n+ }\n+ final Settings hostsSettings = hostsSettingsBuilder.build();\n+ final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n+\n+ // connection to reuse\n+ handleA.transportService.connectToNode(handleB.node);\n+\n+ // install a listener to check that no new connections are made\n+ handleA.transportService.addConnectionListener(new TransportConnectionListener() {\n+ @Override\n+ public void onConnectionOpened(DiscoveryNode node) {\n+ fail(\"should not open any connections. got [\" + node + \"]\");\n+ }\n+ });\n+\n+ final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n+ zenPingA.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId(\"UZP_A\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();\n+ }\n+ });\n+ closeables.push(zenPingA);\n+\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n+ zenPingB.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleB.node).localNodeId(\"UZP_B\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return state;\n+ }\n+ });\n+ closeables.push(zenPingB);\n+\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ ZenPing.PingResponse ping = pingResponses.iterator().next();\n+ assertThat(ping.node().getId(), equalTo(\"UZP_B\"));\n+ assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n+\n+ }\n+\n+ public void testPingingTemporalPings() throws ExecutionException, InterruptedException {\n+ final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n+\n+ NetworkService networkService = new NetworkService(settings, Collections.emptyList());\n+\n+ final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(\n+ s,\n+ threadPool,\n+ BigArrays.NON_RECYCLING_INSTANCE,\n+ new NoneCircuitBreakerService(),\n+ new NamedWriteableRegistry(Collections.emptyList()),\n+ networkService,\n+ v);\n+\n+ NetworkHandle handleA = startServices(settings, threadPool, \"UZP_A\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleA.transportService);\n+ NetworkHandle handleB = startServices(settings, threadPool, \"UZP_B\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleB.transportService);\n+\n+ final Settings hostsSettings = Settings.builder()\n+ .put(\"cluster.name\", \"test\")\n+ .put(\"discovery.zen.ping.unicast.hosts\", (String) null) // use nodes for simplicity\n+ .build();\n+ final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n+\n+ final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n+ zenPingA.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId(\"UZP_A\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();\n+ }\n+ });\n+ closeables.push(zenPingA);\n+\n+ // Node B doesn't know about A!\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n+ zenPingB.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleB.node).localNodeId(\"UZP_B\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return state;\n+ }\n+ });\n+ closeables.push(zenPingB);\n+\n+ {\n+ logger.info(\"pinging from UZP_A so UZP_B will learn about it\");\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ ZenPing.PingResponse ping = pingResponses.iterator().next();\n+ assertThat(ping.node().getId(), equalTo(\"UZP_B\"));\n+ assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n+ }\n+ {\n+ logger.info(\"pinging from UZP_B\");\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingB.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ ZenPing.PingResponse ping = pingResponses.iterator().next();\n+ assertThat(ping.node().getId(), equalTo(\"UZP_A\"));\n+ assertThat(ping.getClusterStateVersion(), equalTo(-1L)); // A has a block\n+ }\n+ }\n+\n public void testInvalidHosts() throws InterruptedException {\n final Logger logger = mock(Logger.class);\n final NetworkService networkService = new NetworkService(Settings.EMPTY, Collections.emptyList());\n@@ -529,72 +704,65 @@ public void testInvalidHosts() throws InterruptedException {\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Arrays.asList(\"127.0.0.1:9300:9300\", \"127.0.0.1:9301\"),\n 1,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test_\",\n TimeValue.timeValueSeconds(1));\n assertThat(discoveryNodes, hasSize(1)); // only one of the two is valid and will be used\n assertThat(discoveryNodes.get(0).getAddress().getAddress(), equalTo(\"127.0.0.1\"));\n assertThat(discoveryNodes.get(0).getAddress().getPort(), equalTo(9301));\n verify(logger).warn(eq(\"failed to resolve host [127.0.0.1:9300:9300]\"), Matchers.any(ExecutionException.class));\n }\n \n- // assert that we tried to ping each of the configured nodes at least once\n- private void assertCountersMoreThan(final NetworkHandle that, final NetworkHandle...handles) {\n- final HashMap<TransportAddress, Integer> moreThan = new HashMap<>();\n- for (final NetworkHandle handle : handles) {\n- assert handle != that;\n- moreThan.put(handle.address, 0);\n- }\n- assertCountersMoreThan(moreThan, that, handles);\n+ private void assertPingCount(final NetworkHandle fromNode, final NetworkHandle toNode, int expectedCount) {\n+ final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger());\n+ final String onNodeName = fromNode.node.getName();\n+ assertNotNull(\"handle for [\" + onNodeName + \"] has no 'expected' counter\", counter);\n+ final String forNodeName = toNode.node.getName();\n+ assertThat(\"node [\" + onNodeName + \"] ping count to [\" + forNodeName + \"] is unexpected\",\n+ counter.get(), equalTo(expectedCount));\n }\n \n- private void assertCountersMoreThan(\n- final Map<TransportAddress, Integer> moreThan,\n- final NetworkHandle that,\n- final NetworkHandle... handles) {\n- for (final NetworkHandle handle : handles) {\n- assert handle != that;\n- assertThat(that.counters.get(handle.address).get(), greaterThan(moreThan.get(handle.address)));\n- }\n+ private NetworkHandle startServices(\n+ final Settings settings,\n+ final ThreadPool threadPool,\n+ final String nodeId,\n+ final Version version,\n+ final BiFunction<Settings, Version, Transport> supplier) {\n+ return startServices(settings, threadPool, nodeId, version, supplier, emptySet());\n+\n }\n \n private NetworkHandle startServices(\n final Settings settings,\n final ThreadPool threadPool,\n final String nodeId,\n final Version version,\n- final BiFunction<Settings, Version, Transport> supplier) {\n- final Transport transport = supplier.apply(settings, version);\n- final TransportService transportService =\n- new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n+ final BiFunction<Settings, Version, Transport> supplier,\n+ final Set<Role> nodeRoles) {\n+ final Settings nodeSettings = Settings.builder().put(settings)\n+ .put(\"node.name\", nodeId)\n+ .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), \"internal:discovery/zen/unicast\")\n+ .build();\n+ final Transport transport = supplier.apply(nodeSettings, version);\n+ final MockTransportService transportService =\n+ new MockTransportService(nodeSettings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n transportService.start();\n transportService.acceptIncomingRequests();\n final ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();\n- transportService.addConnectionListener(new TransportConnectionListener() {\n-\n- @Override\n- public void onNodeConnected(DiscoveryNode node) {\n- }\n-\n+ transportService.addTracer(new MockTransportService.Tracer() {\n @Override\n- public void onConnectionOpened(DiscoveryNode node) {\n+ public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {\n counters.computeIfAbsent(node.getAddress(), k -> new AtomicInteger());\n counters.get(node.getAddress()).incrementAndGet();\n }\n-\n- @Override\n- public void onNodeDisconnected(DiscoveryNode node) {\n- }\n-\n });\n final DiscoveryNode node =\n- new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(), version);\n+ new DiscoveryNode(nodeId, nodeId, transportService.boundAddress().publishAddress(), emptyMap(), nodeRoles, version);\n transportService.setLocalNode(node);\n return new NetworkHandle(transport.boundAddress().publishAddress(), transportService, node, counters);\n }\n@@ -616,7 +784,123 @@ public NetworkHandle(\n this.node = discoveryNode;\n this.counters = counters;\n }\n+ }\n+\n+ private static class TestUnicastZenPing extends UnicastZenPing {\n+\n+ public TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle,\n+ UnicastHostsProvider unicastHostsProvider) {\n+ super(Settings.builder().put(\"node.name\", networkHandle.node.getName()).put(settings).build(),\n+ threadPool, networkHandle.transportService, unicastHostsProvider);\n+ }\n+\n+ volatile CountDownLatch allTasksCompleted;\n+ volatile AtomicInteger pendingTasks;\n+\n+ PingCollection pingAndWait() throws ExecutionException, InterruptedException {\n+ allTasksCompleted = new CountDownLatch(1);\n+ pendingTasks = new AtomicInteger();\n+ // make the three sending rounds to come as started\n+ markTaskAsStarted(\"send pings\");\n+ markTaskAsStarted(\"send pings\");\n+ markTaskAsStarted(\"send pings\");\n+ final CompletableFuture<PingCollection> response = new CompletableFuture<>();\n+ try {\n+ ping(response::complete, TimeValue.timeValueMillis(1), TimeValue.timeValueSeconds(1));\n+ } catch (Exception ex) {\n+ response.completeExceptionally(ex);\n+ }\n+ return response.get();\n+ }\n+\n+ @Override\n+ protected void finishPingingRound(PingingRound pingingRound) {\n+ // wait for all activity to finish before closing\n+ try {\n+ allTasksCompleted.await();\n+ } catch (InterruptedException e) {\n+ // ok, finish anyway\n+ }\n+ super.finishPingingRound(pingingRound);\n+ }\n+\n+ @Override\n+ protected void sendPings(TimeValue timeout, PingingRound pingingRound) {\n+ super.sendPings(timeout, pingingRound);\n+ markTaskAsCompleted(\"send pings\");\n+ }\n+\n+ @Override\n+ protected void submitToExecutor(AbstractRunnable abstractRunnable) {\n+ markTaskAsStarted(\"executor runnable\");\n+ super.submitToExecutor(new AbstractRunnable() {\n+ @Override\n+ public void onRejection(Exception e) {\n+ try {\n+ super.onRejection(e);\n+ } finally {\n+ markTaskAsCompleted(\"executor runnable (rejected)\");\n+ }\n+ }\n+\n+ @Override\n+ public void onAfter() {\n+ markTaskAsCompleted(\"executor runnable\");\n+ }\n+\n+ @Override\n+ protected void doRun() throws Exception {\n+ abstractRunnable.run();\n+ }\n+\n+ @Override\n+ public void onFailure(Exception e) {\n+ // we shouldn't really end up here.\n+ throw new AssertionError(\"unexpected error\", e);\n+ }\n+ });\n+ }\n+\n+ private void markTaskAsStarted(String task) {\n+ logger.trace(\"task [{}] started. count [{}]\", task, pendingTasks.incrementAndGet());\n+ }\n \n+ private void markTaskAsCompleted(String task) {\n+ final int left = pendingTasks.decrementAndGet();\n+ logger.trace(\"task [{}] completed. count [{}]\", task, left);\n+ if (left == 0) {\n+ allTasksCompleted.countDown();\n+ }\n+ }\n+\n+ @Override\n+ protected TransportResponseHandler<UnicastPingResponse> getPingResponseHandler(PingingRound pingingRound, DiscoveryNode node) {\n+ markTaskAsStarted(\"ping [\" + node + \"]\");\n+ TransportResponseHandler<UnicastPingResponse> original = super.getPingResponseHandler(pingingRound, node);\n+ return new TransportResponseHandler<UnicastPingResponse>() {\n+ @Override\n+ public UnicastPingResponse newInstance() {\n+ return original.newInstance();\n+ }\n+\n+ @Override\n+ public void handleResponse(UnicastPingResponse response) {\n+ original.handleResponse(response);\n+ markTaskAsCompleted(\"ping [\" + node + \"]\");\n+ }\n+\n+ @Override\n+ public void handleException(TransportException exp) {\n+ original.handleException(exp);\n+ markTaskAsCompleted(\"ping [\" + node + \"] (error)\");\n+ }\n+\n+ @Override\n+ public String executor() {\n+ return original.executor();\n+ }\n+ };\n+ }\n }\n \n }", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java", "status": "modified" }, { "diff": "@@ -68,7 +68,7 @@ public void testPingCollection() {\n Collections.shuffle(pings, random());\n \n ZenPing.PingCollection collection = new ZenPing.PingCollection();\n- collection.addPings(pings);\n+ pings.forEach(collection::addPing);\n \n List<ZenPing.PingResponse> aggregate = collection.toList();\n ", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java", "status": "modified" }, { "diff": "@@ -208,8 +208,8 @@ public long serverOpen() {\n \n @Override\n public NodeChannels getConnection(DiscoveryNode node) {\n- return new NodeChannels(node, new Object[ConnectionProfile.LIGHT_PROFILE.getNumConnections()],\n- ConnectionProfile.LIGHT_PROFILE);\n+ return new NodeChannels(node, new Object[MockTcpTransport.LIGHT_PROFILE.getNumConnections()],\n+ MockTcpTransport.LIGHT_PROFILE);\n }\n };\n DiscoveryNode node = new DiscoveryNode(\"foo\", buildNewFakeTransportAddress(), Version.CURRENT);", "filename": "core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java", "status": "modified" }, { "diff": "@@ -113,24 +113,14 @@ public void testConnectToNodeLight() throws IOException {\n emptyMap(),\n emptySet(),\n Version.CURRENT.minimumCompatibilityVersion());\n- try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, ConnectionProfile.LIGHT_PROFILE)){\n+ try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, MockTcpTransport.LIGHT_PROFILE)){\n DiscoveryNode connectedNode = handleA.transportService.handshake(connection, timeout);\n assertNotNull(connectedNode);\n // the name and version should be updated\n assertEquals(connectedNode.getName(), \"TS_B\");\n assertEquals(connectedNode.getVersion(), handleB.discoveryNode.getVersion());\n assertFalse(handleA.transportService.nodeConnected(discoveryNode));\n }\n-\n- DiscoveryNode connectedNode =\n- handleA.transportService.connectToNodeAndHandshake(discoveryNode, timeout);\n- assertNotNull(connectedNode);\n-\n- // the name and version should be updated\n- assertEquals(connectedNode.getName(), \"TS_B\");\n- assertEquals(connectedNode.getVersion(), handleB.discoveryNode.getVersion());\n- assertTrue(handleA.transportService.nodeConnected(discoveryNode));\n-\n }\n \n public void testMismatchedClusterName() {\n@@ -145,7 +135,7 @@ public void testMismatchedClusterName() {\n Version.CURRENT.minimumCompatibilityVersion());\n IllegalStateException ex = expectThrows(IllegalStateException.class, () -> {\n try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode,\n- ConnectionProfile.LIGHT_PROFILE)) {\n+ MockTcpTransport.LIGHT_PROFILE)) {\n handleA.transportService.handshake(connection, timeout);\n }\n });\n@@ -166,7 +156,7 @@ public void testIncompatibleVersions() {\n Version.CURRENT.minimumCompatibilityVersion());\n IllegalStateException ex = expectThrows(IllegalStateException.class, () -> {\n try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode,\n- ConnectionProfile.LIGHT_PROFILE)) {\n+ MockTcpTransport.LIGHT_PROFILE)) {\n handleA.transportService.handshake(connection, timeout);\n }\n });", "filename": "core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java", "status": "modified" }, { "diff": "@@ -43,7 +43,7 @@\n import java.util.stream.Stream;\n \n import static org.elasticsearch.discovery.zen.UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT;\n-import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveDiscoveryNodes;\n+import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveHostsLists;\n \n /**\n * An implementation of {@link UnicastHostsProvider} that reads hosts/ports\n@@ -97,13 +97,13 @@ public List<DiscoveryNode> buildDynamicNodes() {\n \n final List<DiscoveryNode> discoNodes = new ArrayList<>();\n try {\n- discoNodes.addAll(resolveDiscoveryNodes(\n+ discoNodes.addAll(resolveHostsLists(\n executorService,\n logger,\n hostsList,\n 1,\n transportService,\n- () -> UNICAST_HOST_PREFIX + nodeIdGenerator.incrementAndGet() + \"#\",\n+ UNICAST_HOST_PREFIX,\n resolveTimeout));\n } catch (InterruptedException e) {\n throw new RuntimeException(e);", "filename": "plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java", "status": "modified" }, { "diff": "@@ -33,9 +33,7 @@\n import org.elasticsearch.transport.MockTcpTransport;\n import org.elasticsearch.transport.TransportService;\n import org.junit.After;\n-import org.junit.AfterClass;\n import org.junit.Before;\n-import org.junit.BeforeClass;\n \n import java.io.BufferedWriter;\n import java.io.IOException;\n@@ -44,7 +42,6 @@\n import java.util.Arrays;\n import java.util.Collections;\n import java.util.List;\n-import java.util.concurrent.Executor;\n import java.util.concurrent.ExecutorService;\n import java.util.concurrent.Executors;\n \n@@ -99,13 +96,13 @@ public void testBuildDynamicNodes() throws Exception {\n assertEquals(hostEntries.size() - 1, nodes.size()); // minus 1 because we are ignoring the first line that's a comment\n assertEquals(\"192.168.0.1\", nodes.get(0).getAddress().getAddress());\n assertEquals(9300, nodes.get(0).getAddress().getPort());\n- assertEquals(UNICAST_HOST_PREFIX + \"1#\", nodes.get(0).getId());\n+ assertEquals(UNICAST_HOST_PREFIX + \"192.168.0.1_0#\", nodes.get(0).getId());\n assertEquals(\"192.168.0.2\", nodes.get(1).getAddress().getAddress());\n assertEquals(9305, nodes.get(1).getAddress().getPort());\n- assertEquals(UNICAST_HOST_PREFIX + \"2#\", nodes.get(1).getId());\n+ assertEquals(UNICAST_HOST_PREFIX + \"192.168.0.2:9305_0#\", nodes.get(1).getId());\n assertEquals(\"255.255.23.15\", nodes.get(2).getAddress().getAddress());\n assertEquals(9300, nodes.get(2).getAddress().getPort());\n- assertEquals(UNICAST_HOST_PREFIX + \"3#\", nodes.get(2).getId());\n+ assertEquals(UNICAST_HOST_PREFIX + \"255.255.23.15_0#\", nodes.get(2).getId());\n }\n \n public void testEmptyUnicastHostsFile() throws Exception {", "filename": "plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java", "status": "modified" }, { "diff": "@@ -28,10 +28,9 @@\n import org.elasticsearch.discovery.zen.ZenPing;\n \n import java.util.HashMap;\n-import java.util.List;\n import java.util.Map;\n import java.util.Set;\n-import java.util.stream.Collectors;\n+import java.util.function.Consumer;\n \n /**\n * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging\n@@ -62,7 +61,7 @@ public void start(PingContextProvider contextProvider) {\n }\n \n @Override\n- public void ping(PingListener listener, TimeValue timeout) {\n+ public void ping(Consumer<PingCollection> resultsConsumer, TimeValue timeout) {\n logger.info(\"pinging using mock zen ping\");\n synchronized (activeNodesPerCluster) {\n Set<MockZenPing> activeNodes = getActiveNodesForCurrentCluster();\n@@ -76,11 +75,12 @@ public void ping(PingListener listener, TimeValue timeout) {\n activeNodes = getActiveNodesForCurrentCluster();\n }\n lastDiscoveredPings = activeNodes;\n- List<PingResponse> responseList = activeNodes.stream()\n+ PingCollection pingCollection = new PingCollection();\n+ activeNodes.stream()\n .filter(p -> p != this) // remove this as pings are not expected to return the local node\n .map(MockZenPing::getPingResponse)\n- .collect(Collectors.toList());\n- listener.onPing(responseList);\n+ .forEach(pingCollection::addPing);\n+ resultsConsumer.accept(pingCollection);\n }\n }\n ", "filename": "test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java", "status": "modified" }, { "diff": "@@ -29,9 +29,7 @@\n import org.elasticsearch.action.ActionListenerResponseHandler;\n import org.elasticsearch.action.support.PlainActionFuture;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n-import org.elasticsearch.common.io.stream.InputStreamStreamInput;\n import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n-import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.logging.Loggers;\n@@ -54,8 +52,6 @@\n import org.junit.Before;\n \n import java.io.IOException;\n-import java.io.InputStream;\n-import java.io.OutputStream;\n import java.io.UncheckedIOException;\n import java.net.InetAddress;\n import java.net.InetSocketAddress;\n@@ -1358,7 +1354,7 @@ public void handleException(TransportException exp) {\n // all is well\n }\n \n- try (Transport.Connection connection = serviceB.openConnection(nodeA, ConnectionProfile.LIGHT_PROFILE)){\n+ try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)){\n serviceB.handshake(connection, 100);\n fail(\"exception should be thrown\");\n } catch (IllegalStateException e) {\n@@ -1416,7 +1412,7 @@ public void handleException(TransportException exp) {\n // all is well\n }\n \n- try (Transport.Connection connection = serviceB.openConnection(nodeA, ConnectionProfile.LIGHT_PROFILE)){\n+ try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)){\n serviceB.handshake(connection, 100);\n fail(\"exception should be thrown\");\n } catch (IllegalStateException e) {", "filename": "test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java", "status": "modified" }, { "diff": "@@ -66,6 +66,23 @@\n */\n public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel> {\n \n+ /**\n+ * A pre-built light connection profile that shares a single connection across all\n+ * types.\n+ */\n+ public static final ConnectionProfile LIGHT_PROFILE;\n+\n+ static {\n+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ builder.addConnections(1,\n+ TransportRequestOptions.Type.BULK,\n+ TransportRequestOptions.Type.PING,\n+ TransportRequestOptions.Type.RECOVERY,\n+ TransportRequestOptions.Type.REG,\n+ TransportRequestOptions.Type.STATE);\n+ LIGHT_PROFILE = builder.build();\n+ }\n+\n private final ExecutorService executor;\n private final Version mockVersion;\n \n@@ -159,7 +176,7 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx\n @Override\n protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile) throws IOException {\n final MockChannel[] mockChannels = new MockChannel[1];\n- final NodeChannels nodeChannels = new NodeChannels(node, mockChannels, ConnectionProfile.LIGHT_PROFILE); // we always use light here\n+ final NodeChannels nodeChannels = new NodeChannels(node, mockChannels, LIGHT_PROFILE); // we always use light here\n boolean success = false;\n final Socket socket = new Socket();\n try {", "filename": "test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.4\n**JVM version**: 1.8.0_91\n**OS version**: RedHat 6.5\n\nWe are using the TribeNode feature to enable search across a number of geographically distributed ElasticSearch clusters. Occasionally when we take one of these clusters completely offline, we find that our TribeNode hits the following exception:\n\n```\njava.lang.OutOfMemoryError: unable to create new native thread\n at java.lang.Thread.start0(Native Method)\n at java.lang.Thread.start(Thread.java:714)\n at java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:950)\n at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1368)\n at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.execute(EsThreadPoolExecutor.java:85)\n at org.elasticsearch.threadpool.ThreadPool$ThreadedRunnable.run(ThreadPool.java:676)\n at org.elasticsearch.threadpool.ThreadPool$LoggingRunnable.run(ThreadPool.java:640)\n at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)\n at java.util.concurrent.FutureTask.run(FutureTask.java:266)\n at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)\n at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n\nThis exception is thrown because of thread exhaustion due to the TribeNode creating a new thread every couple of seconds. Below is the stack trace of the leaked threads:\n\n```\njava.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n org.elasticsearch.common.util.concurrent.KeyedLock.acquire(KeyedLock.java:75)\n org.elasticsearch.transport.netty.NettyTransport.disconnectFromNode(NettyTransport.java:1063)\n org.elasticsearch.transport.TransportService.disconnectFromNode(TransportService.java:274)\n org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing$2$1.doRun(UnicastZenPing.java:258)\n org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n java.lang.Thread.run(Thread.java:745)\n```\n\n**Steps to reproduce**:\n Create TribeNode configuration where one cluster is offline. Its not enough that the processes are shutdown and the machine is online, the nodes specified in the discovery.zen.ping.unicast.hosts for the offline cluster must be offline and not respond to ping/connection attempts. Here is a simple configuration I was able to use to reproduce the problem.\n\n```\n\n---\ncluster.name: \"thread-leak-test\"\nnode.name: \"thread-leak-node\"\nhttp.port: \"9201\"\nhttp.host: \"127.0.0.1\"\ntribe:\n online-cluster:\n cluster.name: \"online-cluster\"\n discovery.zen.ping.unicast.hosts:\n - \"localhost\"\n offline-cluster:\n cluster.name: \"offline-cluster\"\n discovery.zen.ping.unicast.hosts:\n - \"10.10.10.10\"\n```\n\nStart the Tribe node. Observe that the number of threads continue to grow unbounded (`ps -m <pid> | wc -l`) until the OutOfMemoryError: unable to create new native thread exceptions are thrown.\n\nThis issue appears similar to the problem described in #8057.\n", "comments": [ { "body": "@escheie Thanks for reporting! I reproduced it and will come back with a potential fix.\n", "created_at": "2016-08-01T11:31:50Z" }, { "body": "Thanks tlrx for looking into the issue. I've found that setting `discovery.zen.ping.unicast.concurrent_connects` to 1 (default value is 10) limits the number of threads that pile up as the lock gets released to the threads waiting on disconnect every 30s between connection timeouts. When it was 10, the connect threads could theoretically hold the lock forever, preventing the disconnect threads from ever getting a chance to complete.\n", "created_at": "2016-08-05T00:23:25Z" }, { "body": "@escheie True. But I think the comment made is the #19719 makes sense and changing the connection timeout for pings will help. But I still need to think about it again.\n", "created_at": "2016-08-05T07:14:44Z" }, { "body": "With more testing overnight, I found that setting the \"discovery.zen.ping.unicast.concurrent_connects\" to 1 only works if TRACE logging is enabled for discovery. Seems that little extra time the connect thread spends logging gives the other threads performing the disconnect a chance to get the lock. Would a shorter connect timeout help if it is still more than the interval that connects are attempted which appear to be every 1.5 seconds? \n\nLooks like the KeyedLock used by NettyTransport in 2.x and TcpTransport in 5.x supports a fair option so that threads are able to acquire the lock in the order they request it. This fair option is currently set to false. If threads are able to obtain the lock in they order they request it, then that should ensure the disconnect threads get a chance to run between connection attempts. I suppose enabling the fair option though would result in a significant performance penalty, so probably not an option.\n", "created_at": "2016-08-05T17:49:02Z" }, { "body": "I've confirmed that enabling the \"fair\" flag in the KeyedLock does prevent the number of threads from growing unbounded. The maximum number of threads that pile up equals (discovery.zen.ping.unicast.concurrent_connects \\* connect_timeout)/(disconnect frequency) = 10*30/3 = 100. This number can be reduced by lowering discovery.zen.ping.unicast.concurrent_connects in the configuration or if the connect_timeout is also lowered as proposed. \n\nSince it looks like the KeyedLock is only used during connect and disconnect and not for connection lookup, enabling the fair flag may not impact performance as I previously feared.\n", "created_at": "2016-08-05T19:18:24Z" }, { "body": "Thanks @escheie ! Your effort and investigation are great.\n\nI do feel like the issue happens because we try to disconnect from nodes even if we never succeed to connect to them, and `UnicastZenPing` blindly piles up thread for disconnecting them (and these threads try to acquire a lock and slow dows ping threads too).\n\nI proposed a new fix #19719, I'm wondering if it works for you too.\n", "created_at": "2016-08-24T10:34:27Z" } ], "number": 19370, "title": "Thread leak in TribeNode when a cluster is offline" }
{ "body": "The `UnicastZenPing` shows it's age and is the result of many small changes. The current state of affairs is confusing and is hard to reason about. This PR cleans it up (while following the same original intentions). Highlights of the changes are:\r\n\r\n1) Clear 3 round flow - no interleaving of scheduling.\r\n2) The previous implementation did a best effort attempt to wait for ongoing pings to be sent and completed. The pings were guaranteed to complete because each used the total ping duration as a timeout. This did make it hard to reason about the total ping duration and the flow of the code. All of this is removed now and ping should just complete within the given duration or not be counted (note that it was very handy for testing, but I move the needed sync logic to the test).\r\n3) Because of (2) the pinging scheduling changed a bit, to give a chance for the last round to complete. We now ping at the beginning, 1/3 and 2/3 of the duration.\r\n4) To offset for (3) a bit, incoming ping requests are now added to on going ping collections.\r\n5) UnicastZenPing never establishes full blown connections (but does reuse them if there). Relates to #22120\r\n6) Discovery host providers are only used once per pinging round. Closes #21739\r\n7) Usage of the ability to open a connection without connecting to a node ( #22194 ) and shorter connection timeouts helps with connections piling up. Closes #19370\r\n8) Beefed up testing and sped them up.\r\n9) removed light profile from production code ", "number": 22277, "review_comments": [ { "body": "💥 ", "created_at": "2016-12-20T12:11:49Z" }, { "body": "please restore the interrupt status here?", "created_at": "2016-12-20T12:14:46Z" }, { "body": "maybe `(e instanceof AlreadyClosedException) == false`? ", "created_at": "2016-12-20T12:15:43Z" }, { "body": "we call it `ensureOpen` everywhere can we do the same here?", "created_at": "2016-12-20T12:17:30Z" }, { "body": "hmm that looks weird. Can we maybe use a `KeyedLock` when we open the connections with IP and port or something like this?", "created_at": "2016-12-20T12:19:06Z" }, { "body": "use `localNode.equals(incomingObject)` we know that `localnode` is non-null", "created_at": "2016-12-20T12:19:42Z" }, { "body": "if you wanna be more explicit you can use `Function::identity` to make sure it's not a typo", "created_at": "2016-12-20T12:27:05Z" }, { "body": "maybe log this?", "created_at": "2016-12-20T12:28:40Z" }, { "body": "do we need this trace log here and if so can we fix it to say `temporarily` or something like this", "created_at": "2016-12-20T12:30:59Z" }, { "body": "just flip it then you don't need to negate", "created_at": "2016-12-20T12:39:01Z" }, { "body": "if you keep the trace maybe use a logging guard here?", "created_at": "2016-12-20T12:39:18Z" }, { "body": "I wonder if we should do this. I think we should move the `LIGHT_PROFILE` into tests somewhere and then require every special use to build it's own. The problem I have here is that the `getLightProfileWithTimeout` shares one connection across all uses. I think in the case of ping we should only use 1 connection for `PING` and 0 for the others. that will cause an exception if it's used in a wrong context. makes sense?", "created_at": "2016-12-20T12:44:44Z" }, { "body": "++", "created_at": "2016-12-20T12:44:50Z" }, { "body": "this is how it was and we do throw an exception, thus processing the interrupt?", "created_at": "2016-12-20T15:15:23Z" }, { "body": "yep. This morphed - I used to have a log there but got annoyed with it (just noise). ", "created_at": "2016-12-20T15:16:08Z" }, { "body": "good one. Will change.", "created_at": "2016-12-20T15:17:40Z" }, { "body": "yeah, I wanted to have the simplest construct as it was a rare collision. With the latest code I actually think it's impossible (I dedup on addresses and the connection are private to the pinging round). Will remove.", "created_at": "2016-12-20T15:19:15Z" }, { "body": "+++ (although it's not optional on the wire)", "created_at": "2016-12-20T15:19:52Z" }, { "body": "the compiler can't figure out the implied types with the identity. I changed it to be more verbose so it won't look like a typo", "created_at": "2016-12-20T15:26:06Z" }, { "body": "sure", "created_at": "2016-12-20T15:27:15Z" }, { "body": "I adapted the log message", "created_at": "2016-12-20T15:27:59Z" }, { "body": "sure thing, will add.", "created_at": "2016-12-20T15:31:56Z" }, { "body": "sure", "created_at": "2016-12-20T16:36:36Z" }, { "body": "I tried to implement your suggestion and I think it looks good. will push shortly.", "created_at": "2016-12-20T19:43:24Z" }, { "body": "turns out we do need this protection or something similar. I took another approach, which I think you'd like better.", "created_at": "2016-12-20T19:44:56Z" }, { "body": "This logging was initially added to just `testSimplePings` to chase a race. The race has not reproduced since adding this logging. I think that we should drop the logging and and then address if the race comes back since you've changed how these things are handled.", "created_at": "2016-12-20T20:37:17Z" }, { "body": "removed. ", "created_at": "2016-12-20T20:47:55Z" }, { "body": "maybe use `computeIfAbsent()`?", "created_at": "2016-12-21T07:00:52Z" }, { "body": "you didn't like `Function.identity()` ?", "created_at": "2016-12-21T07:01:58Z" }, { "body": "`Arrays.asStream(response.pingResponses)` would not materialize it", "created_at": "2016-12-21T07:02:40Z" } ], "title": "Simplify Unicast Zen Ping" }
{ "commits": [ { "message": "initial implementation" }, { "message": "speed up pinging tests" }, { "message": "linting" }, { "message": "fix FileBasedUnicastHostsProviderTests" }, { "message": "Merge remote-tracking branch 'upstream/master' into unicast_zen_cleanup" }, { "message": "fix racing conditions in waiting for completeness" }, { "message": "better dedupping en using non-async and exact counters" }, { "message": "add a test for remembering incoming pings" }, { "message": "use light connections with the right timeout" }, { "message": "Merge remote-tracking branch 'upstream/master' into unicast_zen_cleanup" }, { "message": "feedback" }, { "message": "feedback" }, { "message": "Merge remote-tracking branch 'upstream/master' into unicast_zen_cleanup" }, { "message": "remove LIGHT_PROFILE in favor of dedicated single channel profiles" }, { "message": "TransportClientNodesService is not ready yet for a single channel type" }, { "message": "remove trace logging" }, { "message": "feedback" } ], "files": [ { "diff": "@@ -101,6 +101,21 @@ final class TransportClientNodesService extends AbstractComponent implements Clo\n \n private final TransportClient.HostFailureListener hostFailureListener;\n \n+ // TODO: migrate this to use low level connections and single type channels\n+ /** {@link ConnectionProfile} to use when to connecting to the listed nodes and doing a liveness check */\n+ private static final ConnectionProfile LISTED_NODES_PROFILE;\n+\n+ static {\n+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ builder.addConnections(1,\n+ TransportRequestOptions.Type.BULK,\n+ TransportRequestOptions.Type.PING,\n+ TransportRequestOptions.Type.RECOVERY,\n+ TransportRequestOptions.Type.REG,\n+ TransportRequestOptions.Type.STATE);\n+ LISTED_NODES_PROFILE = builder.build();\n+ }\n+\n TransportClientNodesService(Settings settings, TransportService transportService,\n ThreadPool threadPool, TransportClient.HostFailureListener hostFailureListener) {\n super(settings);\n@@ -389,8 +404,8 @@ protected void doSample() {\n if (!transportService.nodeConnected(listedNode)) {\n try {\n // its a listed node, light connect to it...\n- logger.trace(\"connecting to listed node (light) [{}]\", listedNode);\n- transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);\n+ logger.trace(\"connecting to listed node [{}]\", listedNode);\n+ transportService.connectToNode(listedNode, LISTED_NODES_PROFILE);\n } catch (Exception e) {\n logger.info(\n (Supplier<?>)\n@@ -470,7 +485,7 @@ public void run() {\n } else {\n // its a listed node, light connect to it...\n logger.trace(\"connecting to listed node (light) [{}]\", listedNode);\n- transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);\n+ transportService.connectToNode(listedNode, LISTED_NODES_PROFILE);\n }\n } catch (Exception e) {\n logger.debug(", "filename": "core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java", "status": "modified" }, { "diff": "@@ -24,7 +24,6 @@\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.common.component.AbstractComponent;\n-import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n@@ -174,7 +173,7 @@ public void logMinimumMasterNodesWarningIfNecessary(ClusterState oldState, Clust\n * Returns the given nodes sorted by likelihood of being elected as master, most likely first.\n * Non-master nodes are not removed but are rather put in the end\n */\n- public static List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {\n+ static List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {\n ArrayList<DiscoveryNode> sortedNodes = CollectionUtils.iterableAsArrayList(nodes);\n CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes);\n return sortedNodes;", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java", "status": "modified" }, { "diff": "@@ -23,13 +23,12 @@\n import org.apache.logging.log4j.Logger;\n import org.apache.logging.log4j.message.ParameterizedMessage;\n import org.apache.logging.log4j.util.Supplier;\n-import org.elasticsearch.ElasticsearchException;\n+import org.apache.lucene.store.AlreadyClosedException;\n+import org.apache.lucene.util.IOUtils;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterName;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n-import org.elasticsearch.common.Nullable;\n-import org.elasticsearch.common.UUIDs;\n import org.elasticsearch.common.component.AbstractComponent;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n@@ -44,10 +43,14 @@\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.common.util.concurrent.EsExecutors;\n-import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;\n+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;\n+import org.elasticsearch.common.util.concurrent.KeyedLock;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.ConnectTransportException;\n+import org.elasticsearch.transport.ConnectionProfile;\n+import org.elasticsearch.transport.NodeNotConnectedException;\n import org.elasticsearch.transport.RemoteTransportException;\n+import org.elasticsearch.transport.Transport.Connection;\n import org.elasticsearch.transport.TransportChannel;\n import org.elasticsearch.transport.TransportException;\n import org.elasticsearch.transport.TransportRequest;\n@@ -60,8 +63,8 @@\n import java.io.IOException;\n import java.util.ArrayList;\n import java.util.Arrays;\n-import java.util.Collection;\n-import java.util.HashSet;\n+import java.util.Collections;\n+import java.util.HashMap;\n import java.util.Iterator;\n import java.util.List;\n import java.util.Locale;\n@@ -70,18 +73,17 @@\n import java.util.Queue;\n import java.util.Set;\n import java.util.concurrent.Callable;\n-import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ExecutionException;\n import java.util.concurrent.ExecutorService;\n import java.util.concurrent.Future;\n-import java.util.concurrent.RejectedExecutionException;\n import java.util.concurrent.ThreadFactory;\n import java.util.concurrent.TimeUnit;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicInteger;\n-import java.util.concurrent.atomic.AtomicReference;\n+import java.util.function.Consumer;\n import java.util.function.Function;\n import java.util.stream.Collectors;\n+import java.util.stream.Stream;\n \n import static java.util.Collections.emptyList;\n import static java.util.Collections.emptyMap;\n@@ -116,22 +118,19 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {\n \n private volatile PingContextProvider contextProvider;\n \n- private final AtomicInteger pingHandlerIdGenerator = new AtomicInteger();\n+ private final AtomicInteger pingingRoundIdGenerator = new AtomicInteger();\n \n- // used to generate unique ids for nodes/address we temporarily connect to\n- private final AtomicInteger unicastNodeIdGenerator = new AtomicInteger();\n-\n- // used as a node id prefix for nodes/address we temporarily connect to\n+ // used as a node id prefix for configured unicast host nodes/address\n private static final String UNICAST_NODE_PREFIX = \"#zen_unicast_\";\n \n- private final Map<Integer, SendPingsHandler> receivedResponses = newConcurrentMap();\n+ private final Map<Integer, PingingRound> activePingingRounds = newConcurrentMap();\n \n // a list of temporal responses a node will return for a request (holds responses from other nodes)\n private final Queue<PingResponse> temporalResponses = ConcurrentCollections.newQueue();\n \n private final UnicastHostsProvider hostsProvider;\n \n- private final ExecutorService unicastZenPingExecutorService;\n+ protected final EsThreadPoolExecutor unicastZenPingExecutorService;\n \n private final TimeValue resolveTimeout;\n \n@@ -146,15 +145,14 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService\n this.hostsProvider = unicastHostsProvider;\n \n this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);\n- final List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);\n- if (hosts.isEmpty()) {\n+ if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) {\n+ configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);\n+ // we only limit to 1 addresses, makes no sense to ping 100 ports\n+ limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;\n+ } else {\n // if unicast hosts are not specified, fill with simple defaults on the local machine\n configuredHosts = transportService.getLocalAddresses();\n limitPortCounts = LIMIT_LOCAL_PORTS_COUNT;\n- } else {\n- configuredHosts = hosts;\n- // we only limit to 1 addresses, makes no sense to ping 100 ports\n- limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;\n }\n resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings);\n logger.debug(\n@@ -164,7 +162,7 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService\n resolveTimeout);\n \n transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest::new, ThreadPool.Names.SAME,\n- new UnicastPingRequestHandler());\n+ new UnicastPingRequestHandler());\n \n final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, \"[unicast_connect]\");\n unicastZenPingExecutorService = EsExecutors.newScaling(\n@@ -186,23 +184,23 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService\n * @param hosts the hosts to resolve\n * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)\n * @param transportService the transport service\n- * @param idGenerator the generator to supply unique ids for each discovery node\n+ * @param nodeId_prefix a prefix to use for node ids\n * @param resolveTimeout the timeout before returning from hostname lookups\n * @return a list of discovery nodes with resolved transport addresses\n */\n- public static List<DiscoveryNode> resolveDiscoveryNodes(\n+ public static List<DiscoveryNode> resolveHostsLists(\n final ExecutorService executorService,\n final Logger logger,\n final List<String> hosts,\n final int limitPortCounts,\n final TransportService transportService,\n- final Supplier<String> idGenerator,\n+ final String nodeId_prefix,\n final TimeValue resolveTimeout) throws InterruptedException {\n Objects.requireNonNull(executorService);\n Objects.requireNonNull(logger);\n Objects.requireNonNull(hosts);\n Objects.requireNonNull(transportService);\n- Objects.requireNonNull(idGenerator);\n+ Objects.requireNonNull(nodeId_prefix);\n Objects.requireNonNull(resolveTimeout);\n if (resolveTimeout.nanos() < 0) {\n throw new IllegalArgumentException(\"resolve timeout must be non-negative but was [\" + resolveTimeout + \"]\");\n@@ -211,7 +209,7 @@ public static List<DiscoveryNode> resolveDiscoveryNodes(\n final List<Callable<TransportAddress[]>> callables =\n hosts\n .stream()\n- .map(hn -> (Callable<TransportAddress[]>)() -> transportService.addressesFromString(hn, limitPortCounts))\n+ .map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts))\n .collect(Collectors.toList());\n final List<Future<TransportAddress[]>> futures =\n executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);\n@@ -226,11 +224,11 @@ public static List<DiscoveryNode> resolveDiscoveryNodes(\n try {\n final TransportAddress[] addresses = future.get();\n logger.trace(\"resolved host [{}] to {}\", hostname, addresses);\n- for (final TransportAddress address : addresses) {\n+ for (int addressId = 0; addressId < addresses.length; addressId++) {\n discoveryNodes.add(\n new DiscoveryNode(\n- idGenerator.get(),\n- address,\n+ nodeId_prefix + hostname + \"_\" + addressId + \"#\",\n+ addresses[addressId],\n emptyMap(),\n emptySet(),\n Version.CURRENT.minimumCompatibilityVersion()));\n@@ -249,8 +247,8 @@ public static List<DiscoveryNode> resolveDiscoveryNodes(\n \n @Override\n public void close() {\n- ThreadPool.terminate(unicastZenPingExecutorService, 0, TimeUnit.SECONDS);\n- Releasables.close(receivedResponses.values());\n+ ThreadPool.terminate(unicastZenPingExecutorService, 10, TimeUnit.SECONDS);\n+ Releasables.close(activePingingRounds.values());\n closed = true;\n }\n \n@@ -266,106 +264,106 @@ public void clearTemporalResponses() {\n temporalResponses.clear();\n }\n \n- // test only\n- Collection<PingResponse> pingAndWait(TimeValue duration) {\n- final AtomicReference<Collection<PingResponse>> response = new AtomicReference<>();\n- final CountDownLatch latch = new CountDownLatch(1);\n- ping(pings -> {\n- response.set(pings);\n- latch.countDown();\n- }, duration);\n- try {\n- latch.await();\n- return response.get();\n- } catch (InterruptedException e) {\n- return null;\n- }\n- }\n-\n /**\n- * Sends three rounds of pings notifying the specified {@link PingListener} when pinging is complete. Pings are sent after resolving\n+ * Sends three rounds of pings notifying the specified {@link Consumer} when pinging is complete. Pings are sent after resolving\n * configured unicast hosts to their IP address (subject to DNS caching within the JVM). A batch of pings is sent, then another batch\n * of pings is sent at half the specified {@link TimeValue}, and then another batch of pings is sent at the specified {@link TimeValue}.\n * The pings that are sent carry a timeout of 1.25 times the specified {@link TimeValue}. When pinging each node, a connection and\n * handshake is performed, with a connection timeout of the specified {@link TimeValue}.\n *\n- * @param listener the callback when pinging is complete\n- * @param duration the timeout for various components of the pings\n+ * @param resultsConsumer the callback when pinging is complete\n+ * @param duration the timeout for various components of the pings\n */\n @Override\n- public void ping(final PingListener listener, final TimeValue duration) {\n- final List<DiscoveryNode> resolvedDiscoveryNodes;\n+ public void ping(final Consumer<PingCollection> resultsConsumer, final TimeValue duration) {\n+ ping(resultsConsumer, duration, duration);\n+ }\n+\n+ /**\n+ * a variant of {@link #ping(Consumer, TimeValue)}, but allows separating the scheduling duration\n+ * from the duration used for request level time outs. This is useful for testing\n+ */\n+ protected void ping(final Consumer<PingCollection> resultsConsumer,\n+ final TimeValue scheduleDuration,\n+ final TimeValue requestDuration) {\n+ final List<DiscoveryNode> seedNodes;\n try {\n- resolvedDiscoveryNodes = resolveDiscoveryNodes(\n+ seedNodes = resolveHostsLists(\n unicastZenPingExecutorService,\n logger,\n configuredHosts,\n limitPortCounts,\n transportService,\n- () -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + \"#\",\n+ UNICAST_NODE_PREFIX,\n resolveTimeout);\n } catch (InterruptedException e) {\n throw new RuntimeException(e);\n }\n- final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingHandlerIdGenerator.incrementAndGet());\n- try {\n- receivedResponses.put(sendPingsHandler.id(), sendPingsHandler);\n- try {\n- sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes);\n- } catch (RejectedExecutionException e) {\n- logger.debug(\"Ping execution rejected\", e);\n- // The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings\n- // But don't bail here, we can retry later on after the send ping has been scheduled.\n+ seedNodes.addAll(hostsProvider.buildDynamicNodes());\n+ final DiscoveryNodes nodes = contextProvider.nodes();\n+ // add all possible master nodes that were active in the last known cluster configuration\n+ for (ObjectCursor<DiscoveryNode> masterNode : nodes.getMasterNodes().values()) {\n+ seedNodes.add(masterNode.value);\n+ }\n+\n+ final ConnectionProfile connectionProfile =\n+ ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, requestDuration, requestDuration);\n+ final PingingRound pingingRound = new PingingRound(pingingRoundIdGenerator.incrementAndGet(), seedNodes, resultsConsumer,\n+ nodes.getLocalNode(), connectionProfile);\n+ activePingingRounds.put(pingingRound.id(), pingingRound);\n+ final AbstractRunnable pingSender = new AbstractRunnable() {\n+ @Override\n+ public void onFailure(Exception e) {\n+ if (e instanceof AlreadyClosedException == false) {\n+ logger.warn(\"unexpected error while pinging\", e);\n+ }\n }\n \n- threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() {\n- @Override\n- protected void doRun() {\n- sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes);\n- threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() {\n- @Override\n- protected void doRun() throws Exception {\n- sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler, resolvedDiscoveryNodes);\n- sendPingsHandler.close();\n- listener.onPing(sendPingsHandler.pingCollection().toList());\n- for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {\n- logger.trace(\"[{}] disconnecting from {}\", sendPingsHandler.id(), node);\n- transportService.disconnectFromNode(node);\n- }\n- }\n+ @Override\n+ protected void doRun() throws Exception {\n+ sendPings(requestDuration, pingingRound);\n+ }\n+ };\n+ threadPool.generic().execute(pingSender);\n+ threadPool.schedule(TimeValue.timeValueMillis(scheduleDuration.millis() / 3), ThreadPool.Names.GENERIC, pingSender);\n+ threadPool.schedule(TimeValue.timeValueMillis(scheduleDuration.millis() / 3 * 2), ThreadPool.Names.GENERIC, pingSender);\n+ threadPool.schedule(scheduleDuration, ThreadPool.Names.GENERIC, new AbstractRunnable() {\n+ @Override\n+ protected void doRun() throws Exception {\n+ finishPingingRound(pingingRound);\n+ }\n \n- @Override\n- public void onFailure(Exception e) {\n- logger.debug(\"Ping execution failed\", e);\n- sendPingsHandler.close();\n- }\n- });\n- }\n+ @Override\n+ public void onFailure(Exception e) {\n+ logger.warn(\"unexpected error while finishing pinging round\", e);\n+ }\n+ });\n+ }\n \n- @Override\n- public void onFailure(Exception e) {\n- logger.debug(\"Ping execution failed\", e);\n- sendPingsHandler.close();\n- }\n- });\n- } catch (EsRejectedExecutionException ex) { // TODO: remove this once ScheduledExecutor has support for AbstractRunnable\n- sendPingsHandler.close();\n- // we are shutting down\n- } catch (Exception e) {\n- sendPingsHandler.close();\n- throw new ElasticsearchException(\"Ping execution failed\", e);\n- }\n+ // for testing\n+ protected void finishPingingRound(PingingRound pingingRound) {\n+ pingingRound.close();\n }\n \n- class SendPingsHandler implements Releasable {\n+ protected class PingingRound implements Releasable {\n private final int id;\n- private final Set<DiscoveryNode> nodeToDisconnect = ConcurrentCollections.newConcurrentSet();\n+ private final Map<TransportAddress, Connection> tempConnections = new HashMap<>();\n+ private final KeyedLock<TransportAddress> connectionLock = new KeyedLock<>(true);\n private final PingCollection pingCollection;\n+ private final List<DiscoveryNode> seedNodes;\n+ private final Consumer<PingCollection> pingListener;\n+ private final DiscoveryNode localNode;\n+ private final ConnectionProfile connectionProfile;\n \n private AtomicBoolean closed = new AtomicBoolean(false);\n \n- SendPingsHandler(int id) {\n+ PingingRound(int id, List<DiscoveryNode> seedNodes, Consumer<PingCollection> resultsConsumer, DiscoveryNode localNode,\n+ ConnectionProfile connectionProfile) {\n this.id = id;\n+ this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes));\n+ this.pingListener = resultsConsumer;\n+ this.localNode = localNode;\n+ this.connectionProfile = connectionProfile;\n this.pingCollection = new PingCollection();\n }\n \n@@ -377,154 +375,170 @@ public boolean isClosed() {\n return this.closed.get();\n }\n \n- public PingCollection pingCollection() {\n- return pingCollection;\n+ public List<DiscoveryNode> getSeedNodes() {\n+ ensureOpen();\n+ return seedNodes;\n+ }\n+\n+ public Connection getOrConnect(DiscoveryNode node) throws IOException {\n+ Connection result;\n+ try (Releasable ignore = connectionLock.acquire(node.getAddress())) {\n+ result = tempConnections.get(node.getAddress());\n+ if (result == null) {\n+ boolean success = false;\n+ result = transportService.openConnection(node, connectionProfile);\n+ try {\n+ transportService.handshake(result, connectionProfile.getHandshakeTimeout().millis());\n+ synchronized (this) {\n+ // acquire lock to prevent concurrent closing\n+ Connection existing = tempConnections.put(node.getAddress(), result);\n+ assert existing == null;\n+ success = true;\n+ }\n+ } finally {\n+ if (success == false) {\n+ IOUtils.closeWhileHandlingException(result);\n+ }\n+ }\n+ }\n+ }\n+ return result;\n+ }\n+\n+ private void ensureOpen() {\n+ if (isClosed()) {\n+ throw new AlreadyClosedException(\"pinging round [\" + id + \"] is finished\");\n+ }\n+ }\n+\n+ public void addPingResponseToCollection(PingResponse pingResponse) {\n+ if (localNode.equals(pingResponse.node()) == false) {\n+ pingCollection.addPing(pingResponse);\n+ }\n }\n \n @Override\n public void close() {\n- if (closed.compareAndSet(false, true)) {\n- receivedResponses.remove(id);\n+ List<Connection> toClose = null;\n+ synchronized (this) {\n+ if (closed.compareAndSet(false, true)) {\n+ activePingingRounds.remove(id);\n+ toClose = new ArrayList<>(tempConnections.values());\n+ tempConnections.clear();\n+ }\n }\n+ if (toClose != null) {\n+ // we actually closed\n+ try {\n+ pingListener.accept(pingCollection);\n+ } finally {\n+ IOUtils.closeWhileHandlingException(toClose);\n+ }\n+ }\n+ }\n+\n+ public ConnectionProfile getConnectionProfile() {\n+ return connectionProfile;\n }\n }\n \n \n- void sendPings(\n- final TimeValue timeout,\n- @Nullable TimeValue waitTime,\n- final SendPingsHandler sendPingsHandler,\n- final List<DiscoveryNode> resolvedDiscoveryNodes) {\n+ protected void sendPings(final TimeValue timeout, final PingingRound pingingRound) {\n final UnicastPingRequest pingRequest = new UnicastPingRequest();\n- pingRequest.id = sendPingsHandler.id();\n+ pingRequest.id = pingingRound.id();\n pingRequest.timeout = timeout;\n DiscoveryNodes discoNodes = contextProvider.nodes();\n \n pingRequest.pingResponse = createPingResponse(discoNodes);\n \n- HashSet<DiscoveryNode> nodesToPingSet = new HashSet<>();\n- for (PingResponse temporalResponse : temporalResponses) {\n- // Only send pings to nodes that have the same cluster name.\n- if (clusterName.equals(temporalResponse.clusterName())) {\n- nodesToPingSet.add(temporalResponse.node());\n- }\n- }\n- nodesToPingSet.addAll(hostsProvider.buildDynamicNodes());\n+ Set<DiscoveryNode> nodesFromResponses = temporalResponses.stream().map(pingResponse -> {\n+ assert clusterName.equals(pingResponse.clusterName()) :\n+ \"got a ping request from a different cluster. expected \" + clusterName + \" got \" + pingResponse.clusterName();\n+ return pingResponse.node();\n+ }).collect(Collectors.toSet());\n \n- // add all possible master nodes that were active in the last known cluster configuration\n- for (ObjectCursor<DiscoveryNode> masterNode : discoNodes.getMasterNodes().values()) {\n- nodesToPingSet.add(masterNode.value);\n- }\n+ // dedup by address\n+ final Map<TransportAddress, DiscoveryNode> uniqueNodesByAddress =\n+ Stream.concat(pingingRound.getSeedNodes().stream(), nodesFromResponses.stream())\n+ .collect(Collectors.toMap(DiscoveryNode::getAddress, Function.identity(), (n1, n2) -> n1));\n \n- // sort the nodes by likelihood of being an active master\n- List<DiscoveryNode> sortedNodesToPing = ElectMasterService.sortByMasterLikelihood(nodesToPingSet);\n-\n- // add the configured hosts first\n- final List<DiscoveryNode> nodesToPing = new ArrayList<>(resolvedDiscoveryNodes.size() + sortedNodesToPing.size());\n- nodesToPing.addAll(resolvedDiscoveryNodes);\n- nodesToPing.addAll(sortedNodesToPing);\n-\n- final CountDownLatch latch = new CountDownLatch(nodesToPing.size());\n- for (final DiscoveryNode node : nodesToPing) {\n- // make sure we are connected\n- final boolean nodeFoundByAddress;\n- DiscoveryNode nodeToSend = discoNodes.findByAddress(node.getAddress());\n- if (nodeToSend != null) {\n- nodeFoundByAddress = true;\n- } else {\n- nodeToSend = node;\n- nodeFoundByAddress = false;\n- }\n \n- if (!transportService.nodeConnected(nodeToSend)) {\n- if (sendPingsHandler.isClosed()) {\n- return;\n+ // resolve what we can via the latest cluster state\n+ final Set<DiscoveryNode> nodesToPing = uniqueNodesByAddress.values().stream()\n+ .map(node -> {\n+ DiscoveryNode foundNode = discoNodes.findByAddress(node.getAddress());\n+ if (foundNode == null) {\n+ return node;\n+ } else {\n+ return foundNode;\n }\n- // if we find on the disco nodes a matching node by address, we are going to restore the connection\n- // anyhow down the line if its not connected...\n- // if we can't resolve the node, we don't know and we have to clean up after pinging. We do have\n- // to make sure we don't disconnect a true node which was temporarily removed from the DiscoveryNodes\n- // but will be added again during the pinging. We therefore create a new temporary node\n- if (!nodeFoundByAddress) {\n- if (!nodeToSend.getId().startsWith(UNICAST_NODE_PREFIX)) {\n- DiscoveryNode tempNode = new DiscoveryNode(\"\",\n- UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + \"_\" + nodeToSend.getId() + \"#\",\n- UUIDs.randomBase64UUID(), nodeToSend.getHostName(), nodeToSend.getHostAddress(), nodeToSend.getAddress(),\n- nodeToSend.getAttributes(), nodeToSend.getRoles(), nodeToSend.getVersion());\n-\n- logger.trace(\"replacing {} with temp node {}\", nodeToSend, tempNode);\n- nodeToSend = tempNode;\n+ }).collect(Collectors.toSet());\n+\n+ nodesToPing.forEach(node -> sendPingRequestToNode(node, timeout, pingingRound, pingRequest));\n+ }\n+\n+ private void sendPingRequestToNode(final DiscoveryNode node, TimeValue timeout, final PingingRound pingingRound,\n+ final UnicastPingRequest pingRequest) {\n+ submitToExecutor(new AbstractRunnable() {\n+ @Override\n+ protected void doRun() throws Exception {\n+ Connection connection = null;\n+ if (transportService.nodeConnected(node)) {\n+ try {\n+ // concurrency can still cause disconnects\n+ connection = transportService.getConnection(node);\n+ } catch (NodeNotConnectedException e) {\n+ logger.trace(\"[{}] node [{}] just disconnected, will create a temp connection\", pingingRound.id(), node);\n }\n- sendPingsHandler.nodeToDisconnect.add(nodeToSend);\n }\n- // fork the connection to another thread\n- final DiscoveryNode finalNodeToSend = nodeToSend;\n- unicastZenPingExecutorService.execute(new Runnable() {\n- @Override\n- public void run() {\n- if (sendPingsHandler.isClosed()) {\n- return;\n- }\n- boolean success = false;\n- try {\n- // connect to the node, see if we manage to do it, if not, bail\n- if (!nodeFoundByAddress) {\n- logger.trace(\"[{}] connecting (light) to {}\", sendPingsHandler.id(), finalNodeToSend);\n- transportService.connectToNodeAndHandshake(finalNodeToSend, timeout.getMillis());\n- } else {\n- logger.trace(\"[{}] connecting to {}\", sendPingsHandler.id(), finalNodeToSend);\n- transportService.connectToNode(finalNodeToSend);\n- }\n- logger.trace(\"[{}] connected to {}\", sendPingsHandler.id(), node);\n- if (receivedResponses.containsKey(sendPingsHandler.id())) {\n- // we are connected and still in progress, send the ping request\n- sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, finalNodeToSend);\n- } else {\n- // connect took too long, just log it and bail\n- latch.countDown();\n- logger.trace(\"[{}] connect to {} was too long outside of ping window, bailing\",\n- sendPingsHandler.id(), node);\n- }\n- success = true;\n- } catch (ConnectTransportException e) {\n- // can't connect to the node - this is a more common path!\n- logger.trace(\n- (Supplier<?>) () -> new ParameterizedMessage(\n- \"[{}] failed to connect to {}\", sendPingsHandler.id(), finalNodeToSend), e);\n- } catch (RemoteTransportException e) {\n- // something went wrong on the other side\n- logger.debug(\n- (Supplier<?>) () -> new ParameterizedMessage(\n- \"[{}] received a remote error as a response to ping {}\", sendPingsHandler.id(), finalNodeToSend), e);\n- } catch (Exception e) {\n- logger.warn(\n- (Supplier<?>) () -> new ParameterizedMessage(\n- \"[{}] failed send ping to {}\", sendPingsHandler.id(), finalNodeToSend), e);\n- } finally {\n- if (!success) {\n- latch.countDown();\n- }\n- }\n- }\n- });\n- } else {\n- sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, nodeToSend);\n+\n+ if (connection == null) {\n+ connection = pingingRound.getOrConnect(node);\n+ }\n+\n+ logger.trace(\"[{}] sending to {}\", pingingRound.id(), node);\n+ transportService.sendRequest(connection, ACTION_NAME, pingRequest,\n+ TransportRequestOptions.builder().withTimeout((long) (timeout.millis() * 1.25)).build(),\n+ getPingResponseHandler(pingingRound, node));\n }\n- }\n- if (waitTime != null) {\n- try {\n- latch.await(waitTime.millis(), TimeUnit.MILLISECONDS);\n- } catch (InterruptedException e) {\n- // ignore\n+\n+ @Override\n+ public void onFailure(Exception e) {\n+ if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) {\n+ // can't connect to the node - this is more common path!\n+ logger.trace(\n+ (Supplier<?>) () -> new ParameterizedMessage(\n+ \"[{}] failed to ping {}\", pingingRound.id(), node), e);\n+ } else if (e instanceof RemoteTransportException) {\n+ // something went wrong on the other side\n+ logger.debug(\n+ (Supplier<?>) () -> new ParameterizedMessage(\n+ \"[{}] received a remote error as a response to ping {}\", pingingRound.id(), node), e);\n+ } else {\n+ logger.warn(\n+ (Supplier<?>) () -> new ParameterizedMessage(\n+ \"[{}] failed send ping to {}\", pingingRound.id(), node), e);\n+ }\n }\n- }\n+\n+ @Override\n+ public void onRejection(Exception e) {\n+ // The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings\n+ // But don't bail here, we can retry later on after the send ping has been scheduled.\n+ logger.debug(\"Ping execution rejected\", e);\n+ }\n+ });\n }\n \n- private void sendPingRequestToNode(final int id, final TimeValue timeout, final UnicastPingRequest pingRequest,\n- final CountDownLatch latch, final DiscoveryNode node, final DiscoveryNode nodeToSend) {\n- logger.trace(\"[{}] sending to {}\", id, nodeToSend);\n- transportService.sendRequest(nodeToSend, ACTION_NAME, pingRequest, TransportRequestOptions.builder()\n- .withTimeout((long) (timeout.millis() * 1.25)).build(), new TransportResponseHandler<UnicastPingResponse>() {\n+ // for testing\n+ protected void submitToExecutor(AbstractRunnable abstractRunnable) {\n+ unicastZenPingExecutorService.execute(abstractRunnable);\n+ }\n+\n+ // for testing\n+ protected TransportResponseHandler<UnicastPingResponse> getPingResponseHandler(final PingingRound pingingRound,\n+ final DiscoveryNode node) {\n+ return new TransportResponseHandler<UnicastPingResponse>() {\n \n @Override\n public UnicastPingResponse newInstance() {\n@@ -538,50 +552,36 @@ public String executor() {\n \n @Override\n public void handleResponse(UnicastPingResponse response) {\n- logger.trace(\"[{}] received response from {}: {}\", id, nodeToSend, Arrays.toString(response.pingResponses));\n- try {\n- DiscoveryNodes discoveryNodes = contextProvider.nodes();\n- for (PingResponse pingResponse : response.pingResponses) {\n- if (pingResponse.node().equals(discoveryNodes.getLocalNode())) {\n- // that's us, ignore\n- continue;\n- }\n- SendPingsHandler sendPingsHandler = receivedResponses.get(response.id);\n- if (sendPingsHandler == null) {\n- if (!closed) {\n- // Only log when we're not closing the node. Having no send ping handler is then expected\n- logger.warn(\"received ping response {} with no matching handler id [{}]\", pingResponse, response.id);\n- }\n- } else {\n- sendPingsHandler.pingCollection().addPing(pingResponse);\n- }\n+ logger.trace(\"[{}] received response from {}: {}\", pingingRound.id(), node, Arrays.toString(response.pingResponses));\n+ if (pingingRound.isClosed()) {\n+ if (logger.isTraceEnabled()) {\n+ logger.trace(\"[{}] skipping received response from {}. already closed\", pingingRound.id(), node);\n }\n- } finally {\n- latch.countDown();\n+ } else {\n+ Stream.of(response.pingResponses).forEach(pingingRound::addPingResponseToCollection);\n }\n }\n \n @Override\n public void handleException(TransportException exp) {\n- latch.countDown();\n- if (exp instanceof ConnectTransportException) {\n+ if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) {\n // ok, not connected...\n- logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"failed to connect to {}\", nodeToSend), exp);\n- } else {\n+ logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"failed to connect to {}\", node), exp);\n+ } else if (closed == false) {\n logger.warn((Supplier<?>) () -> new ParameterizedMessage(\"failed to send ping to [{}]\", node), exp);\n }\n }\n- });\n+ };\n }\n \n private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) {\n+ assert clusterName.equals(request.pingResponse.clusterName()) :\n+ \"got a ping request from a different cluster. expected \" + clusterName + \" got \" + request.pingResponse.clusterName();\n temporalResponses.add(request.pingResponse);\n- threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() {\n- @Override\n- public void run() {\n- temporalResponses.remove(request.pingResponse);\n- }\n- });\n+ // add to any ongoing pinging\n+ activePingingRounds.values().forEach(p -> p.addPingResponseToCollection(request.pingResponse));\n+ threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME,\n+ () -> temporalResponses.remove(request.pingResponse));\n \n List<PingResponse> pingResponses = CollectionUtils.iterableAsArrayList(temporalResponses);\n pingResponses.add(createPingResponse(contextProvider.nodes()));\n@@ -601,11 +601,11 @@ public void messageReceived(UnicastPingRequest request, TransportChannel channel\n channel.sendResponse(handlePingRequest(request));\n } else {\n throw new IllegalStateException(\n- String.format(\n- Locale.ROOT,\n- \"mismatched cluster names; request: [%s], local: [%s]\",\n- request.pingResponse.clusterName().value(),\n- clusterName.value()));\n+ String.format(\n+ Locale.ROOT,\n+ \"mismatched cluster names; request: [%s], local: [%s]\",\n+ request.pingResponse.clusterName().value(),\n+ clusterName.value()));\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java", "status": "modified" }, { "diff": "@@ -67,11 +67,11 @@\n import java.util.ArrayList;\n import java.util.List;\n import java.util.Set;\n-import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.CompletableFuture;\n+import java.util.concurrent.ExecutionException;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicInteger;\n import java.util.concurrent.atomic.AtomicReference;\n-import java.util.function.BiFunction;\n import java.util.function.Consumer;\n import java.util.stream.Collectors;\n \n@@ -1021,24 +1021,22 @@ public void handleException(TransportException exp) {\n }\n \n private ZenPing.PingCollection pingAndWait(TimeValue timeout) {\n- final ZenPing.PingCollection response = new ZenPing.PingCollection();\n- final CountDownLatch latch = new CountDownLatch(1);\n+ final CompletableFuture<ZenPing.PingCollection> response = new CompletableFuture<>();\n try {\n- zenPing.ping(pings -> {\n- response.addPings(pings);\n- latch.countDown();\n- }, timeout);\n+ zenPing.ping(response::complete, timeout);\n } catch (Exception ex) {\n- logger.warn(\"Ping execution failed\", ex);\n- latch.countDown();\n+ // logged later\n+ response.completeExceptionally(ex);\n }\n \n try {\n- latch.await();\n- return response;\n+ return response.get();\n } catch (InterruptedException e) {\n logger.trace(\"pingAndWait interrupted\");\n- return response;\n+ return new ZenPing.PingCollection();\n+ } catch (ExecutionException e) {\n+ logger.warn(\"Ping execution failed\", e);\n+ return new ZenPing.PingCollection();\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java", "status": "modified" }, { "diff": "@@ -30,29 +30,19 @@\n \n import java.io.IOException;\n import java.util.ArrayList;\n-import java.util.Collection;\n import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n import java.util.concurrent.atomic.AtomicLong;\n+import java.util.function.Consumer;\n \n import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;\n \n public interface ZenPing extends Releasable {\n \n void start(PingContextProvider contextProvider);\n \n- void ping(PingListener listener, TimeValue timeout);\n-\n- interface PingListener {\n-\n- /**\n- * called when pinging is done.\n- *\n- * @param pings ping result *must\n- */\n- void onPing(Collection<PingResponse> pings);\n- }\n+ void ping(Consumer<PingCollection> resultsConsumer, TimeValue timeout);\n \n class PingResponse implements Streamable {\n \n@@ -191,13 +181,6 @@ public synchronized boolean addPing(PingResponse ping) {\n return false;\n }\n \n- /** adds multiple pings if newer than previous pings from the same node */\n- public synchronized void addPings(Iterable<PingResponse> pings) {\n- for (PingResponse ping : pings) {\n- addPing(ping);\n- }\n- }\n-\n /** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */\n public synchronized List<PingResponse> toList() {\n return new ArrayList<>(pings.values());", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.transport;\n \n+import org.elasticsearch.common.inject.internal.Nullable;\n import org.elasticsearch.common.unit.TimeValue;\n \n import java.util.ArrayList;\n@@ -35,16 +36,25 @@\n public final class ConnectionProfile {\n \n /**\n- * A pre-built light connection profile that shares a single connection across all\n- * types.\n+ * Builds a connection profile that is dedicated to a single channel type. Use this\n+ * when opening single use connections\n */\n- public static final ConnectionProfile LIGHT_PROFILE = new ConnectionProfile(\n- Collections.singletonList(new ConnectionTypeHandle(0, 1, EnumSet.of(\n- TransportRequestOptions.Type.BULK,\n- TransportRequestOptions.Type.PING,\n- TransportRequestOptions.Type.RECOVERY,\n- TransportRequestOptions.Type.REG,\n- TransportRequestOptions.Type.STATE))), 1, null, null);\n+ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType,\n+ @Nullable TimeValue connectTimeout,\n+ @Nullable TimeValue handshakeTimeout) {\n+ Builder builder = new Builder();\n+ builder.addConnections(1, channelType);\n+ final EnumSet<TransportRequestOptions.Type> otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class);\n+ otherTypes.remove(channelType);\n+ builder.addConnections(0, otherTypes.stream().toArray(TransportRequestOptions.Type[]::new));\n+ if (connectTimeout != null) {\n+ builder.setConnectTimeout(connectTimeout);\n+ }\n+ if (handshakeTimeout != null) {\n+ builder.setHandshakeTimeout(handshakeTimeout);\n+ }\n+ return builder.build();\n+ }\n \n private final List<ConnectionTypeHandle> handles;\n private final int numConnections;", "filename": "core/src/main/java/org/elasticsearch/transport/ConnectionProfile.java", "status": "modified" }, { "diff": "@@ -63,8 +63,7 @@ public interface Transport extends LifecycleComponent {\n boolean nodeConnected(DiscoveryNode node);\n \n /**\n- * Connects to a node with the given connection profile. Use {@link ConnectionProfile#LIGHT_PROFILE} when just connecting for ping\n- * and then disconnecting. If the node is already connected this method has no effect\n+ * Connects to a node with the given connection profile. If the node is already connected this method has no effect\n */\n void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile) throws ConnectTransportException;\n ", "filename": "core/src/main/java/org/elasticsearch/transport/Transport.java", "status": "modified" }, { "diff": "@@ -62,7 +62,6 @@\n import java.util.concurrent.CopyOnWriteArrayList;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ScheduledFuture;\n-import java.util.concurrent.atomic.AtomicLong;\n import java.util.function.Function;\n import java.util.function.Supplier;\n \n@@ -328,32 +327,6 @@ public Transport.Connection openConnection(final DiscoveryNode node, ConnectionP\n }\n }\n \n- /**\n- * Lightly connect to the specified node, returning updated node\n- * information. The handshake will fail if the cluster name on the\n- * target node mismatches the local cluster name and\n- * {@code checkClusterName} is {@code true}.\n- *\n- * @param node the node to connect to\n- * @param handshakeTimeout handshake timeout\n- * @return the connected node\n- * @throws ConnectTransportException if the connection failed\n- * @throws IllegalStateException if the handshake failed\n- */\n- public DiscoveryNode connectToNodeAndHandshake(\n- final DiscoveryNode node,\n- final long handshakeTimeout) throws IOException {\n- if (node.equals(localNode)) {\n- return localNode;\n- }\n- DiscoveryNode handshakeNode;\n- try (Transport.Connection connection = transport.openConnection(node, ConnectionProfile.LIGHT_PROFILE)) {\n- handshakeNode = handshake(connection, handshakeTimeout);\n- }\n- connectToNode(node, ConnectionProfile.LIGHT_PROFILE);\n- return handshakeNode;\n- }\n-\n /**\n * Executes a high-level handshake using the given connection\n * and returns the discovery node of the node the connection", "filename": "core/src/main/java/org/elasticsearch/transport/TransportService.java", "status": "modified" }, { "diff": "@@ -26,6 +26,7 @@\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.block.ClusterBlocks;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.cluster.node.DiscoveryNode.Role;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n import org.elasticsearch.common.network.NetworkAddress;\n@@ -34,18 +35,22 @@\n import org.elasticsearch.common.transport.TransportAddress;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.BigArrays;\n+import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.common.util.concurrent.EsExecutors;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.VersionUtils;\n-import org.elasticsearch.test.junit.annotations.TestLogging;\n import org.elasticsearch.test.transport.MockTransportService;\n import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n+import org.elasticsearch.transport.ConnectionProfile;\n import org.elasticsearch.transport.MockTcpTransport;\n import org.elasticsearch.transport.Transport;\n import org.elasticsearch.transport.TransportConnectionListener;\n+import org.elasticsearch.transport.TransportException;\n+import org.elasticsearch.transport.TransportRequestOptions;\n+import org.elasticsearch.transport.TransportResponseHandler;\n import org.elasticsearch.transport.TransportService;\n import org.elasticsearch.transport.TransportSettings;\n import org.junit.After;\n@@ -60,12 +65,14 @@\n import java.util.Arrays;\n import java.util.Collection;\n import java.util.Collections;\n+import java.util.EnumSet;\n import java.util.HashMap;\n import java.util.HashSet;\n import java.util.List;\n import java.util.Map;\n import java.util.Set;\n import java.util.Stack;\n+import java.util.concurrent.CompletableFuture;\n import java.util.concurrent.ConcurrentMap;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ExecutionException;\n@@ -82,7 +89,6 @@\n import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;\n import static org.hamcrest.Matchers.empty;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.greaterThan;\n import static org.hamcrest.Matchers.hasSize;\n import static org.mockito.Matchers.eq;\n import static org.mockito.Mockito.mock;\n@@ -124,8 +130,7 @@ public void tearDown() throws Exception {\n \n private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList;\n \n- @TestLogging(\"org.elasticsearch.transport:TRACE,org.elasticsearch.discovery.zen.UnicastZenPing:TRACE\")\n- public void testSimplePings() throws IOException, InterruptedException {\n+ public void testSimplePings() throws IOException, InterruptedException, ExecutionException {\n // use ephemeral ports\n final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n final Settings settingsMismatch =\n@@ -140,33 +145,43 @@ public void testSimplePings() throws IOException, InterruptedException {\n new NoneCircuitBreakerService(),\n new NamedWriteableRegistry(Collections.emptyList()),\n networkService,\n- v);\n+ v) {\n+ @Override\n+ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile) {\n+ throw new AssertionError(\"zen pings should never connect to node (got [\" + node + \"])\");\n+ }\n+ };\n \n NetworkHandle handleA = startServices(settings, threadPool, \"UZP_A\", Version.CURRENT, supplier);\n closeables.push(handleA.transportService);\n NetworkHandle handleB = startServices(settings, threadPool, \"UZP_B\", Version.CURRENT, supplier);\n closeables.push(handleB.transportService);\n NetworkHandle handleC = startServices(settingsMismatch, threadPool, \"UZP_C\", Version.CURRENT, supplier);\n closeables.push(handleC.transportService);\n- // just fake that no versions are compatible with this node\n- Version previousVersion = VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion());\n- Version versionD = VersionUtils.randomVersionBetween(random(), previousVersion.minimumCompatibilityVersion(), previousVersion);\n+ final Version versionD;\n+ if (randomBoolean()) {\n+ versionD = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT);\n+ } else {\n+ versionD = Version.CURRENT;\n+ }\n+ logger.info(\"UZP_D version set to [{}]\", versionD);\n NetworkHandle handleD = startServices(settingsMismatch, threadPool, \"UZP_D\", versionD, supplier);\n closeables.push(handleD.transportService);\n \n final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n+ final ClusterState stateMismatch = ClusterState.builder(new ClusterName(\"mismatch\")).version(randomPositiveLong()).build();\n \n Settings hostsSettings = Settings.builder()\n- .putArray(\"discovery.zen.ping.unicast.hosts\",\n+ .putArray(\"discovery.zen.ping.unicast.hosts\",\n NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())),\n NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())),\n NetworkAddress.format(new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort())),\n NetworkAddress.format(new InetSocketAddress(handleD.address.address().getAddress(), handleD.address.address().getPort())))\n- .put(\"cluster.name\", \"test\")\n- .build();\n+ .put(\"cluster.name\", \"test\")\n+ .build();\n \n Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build();\n- UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n zenPingA.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -180,7 +195,7 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingA);\n \n- UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n zenPingB.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -194,7 +209,8 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingB);\n \n- UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER) {\n+ TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC,\n+ EMPTY_HOSTS_PROVIDER) {\n @Override\n protected Version getVersion() {\n return versionD;\n@@ -208,12 +224,13 @@ public DiscoveryNodes nodes() {\n \n @Override\n public ClusterState clusterState() {\n- return state;\n+ return stateMismatch;\n }\n });\n closeables.push(zenPingC);\n \n- UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD,\n+ EMPTY_HOSTS_PROVIDER);\n zenPingD.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -222,40 +239,48 @@ public DiscoveryNodes nodes() {\n \n @Override\n public ClusterState clusterState() {\n- return state;\n+ return stateMismatch;\n }\n });\n closeables.push(zenPingD);\n \n logger.info(\"ping from UZP_A\");\n- Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueMillis(500));\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n assertThat(pingResponses.size(), equalTo(1));\n ZenPing.PingResponse ping = pingResponses.iterator().next();\n assertThat(ping.node().getId(), equalTo(\"UZP_B\"));\n assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n- assertCountersMoreThan(handleA, handleB, handleC, handleD);\n+ assertPingCount(handleA, handleB, 3);\n+ assertPingCount(handleA, handleC, 0); // mismatch, shouldn't ping\n+ assertPingCount(handleA, handleD, 0); // mismatch, shouldn't ping\n \n // ping again, this time from B,\n logger.info(\"ping from UZP_B\");\n- pingResponses = zenPingB.pingAndWait(TimeValue.timeValueMillis(500));\n+ pingResponses = zenPingB.pingAndWait().toList();\n assertThat(pingResponses.size(), equalTo(1));\n ping = pingResponses.iterator().next();\n assertThat(ping.node().getId(), equalTo(\"UZP_A\"));\n assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));\n- assertCountersMoreThan(handleB, handleA, handleC, handleD);\n+ assertPingCount(handleB, handleA, 3);\n+ assertPingCount(handleB, handleC, 0); // mismatch, shouldn't ping\n+ assertPingCount(handleB, handleD, 0); // mismatch, shouldn't ping\n \n logger.info(\"ping from UZP_C\");\n- pingResponses = zenPingC.pingAndWait(TimeValue.timeValueMillis(500));\n- assertThat(pingResponses.size(), equalTo(0));\n- assertCountersMoreThan(handleC, handleA, handleB, handleD);\n+ pingResponses = zenPingC.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ assertPingCount(handleC, handleA, 0);\n+ assertPingCount(handleC, handleB, 0);\n+ assertPingCount(handleC, handleD, 3);\n \n logger.info(\"ping from UZP_D\");\n- pingResponses = zenPingD.pingAndWait(TimeValue.timeValueMillis(500));\n- assertThat(pingResponses.size(), equalTo(0));\n- assertCountersMoreThan(handleD, handleA, handleB, handleC);\n+ pingResponses = zenPingD.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ assertPingCount(handleD, handleA, 0);\n+ assertPingCount(handleD, handleB, 0);\n+ assertPingCount(handleD, handleC, 3);\n }\n \n- public void testUnknownHostNotCached() {\n+ public void testUnknownHostNotCached() throws ExecutionException, InterruptedException {\n // use ephemeral ports\n final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n \n@@ -306,7 +331,7 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n \n final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n \n- final UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER);\n+ final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n zenPingA.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -320,7 +345,7 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingA);\n \n- UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n zenPingB.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -334,7 +359,7 @@ public ClusterState clusterState() {\n });\n closeables.push(zenPingB);\n \n- UnicastZenPing zenPingC = new UnicastZenPing(hostsSettings, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER);\n+ TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, EMPTY_HOSTS_PROVIDER);\n zenPingC.start(new PingContextProvider() {\n @Override\n public DiscoveryNodes nodes() {\n@@ -350,12 +375,13 @@ public ClusterState clusterState() {\n \n // the presence of an unresolvable host should not prevent resolvable hosts from being pinged\n {\n- final Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueMillis(500));\n+ final Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n assertThat(pingResponses.size(), equalTo(1));\n ZenPing.PingResponse ping = pingResponses.iterator().next();\n assertThat(ping.node().getId(), equalTo(\"UZP_C\"));\n assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n- assertCountersMoreThan(handleA, handleC);\n+ assertPingCount(handleA, handleB, 0);\n+ assertPingCount(handleA, handleC, 3);\n assertNull(handleA.counters.get(handleB.address));\n }\n \n@@ -373,11 +399,13 @@ public ClusterState clusterState() {\n \n // now we should see pings to UZP_B; this establishes that host resolutions are not cached\n {\n- final Collection<ZenPing.PingResponse> secondPingResponses = zenPingA.pingAndWait(TimeValue.timeValueMillis(500));\n+ handleA.counters.clear();\n+ final Collection<ZenPing.PingResponse> secondPingResponses = zenPingA.pingAndWait().toList();\n assertThat(secondPingResponses.size(), equalTo(2));\n final Set<String> ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList()));\n assertThat(ids, equalTo(new HashSet<>(Arrays.asList(\"UZP_B\", \"UZP_C\"))));\n- assertCountersMoreThan(moreThan, handleA, handleB, handleC);\n+ assertPingCount(handleA, handleB, 3);\n+ assertPingCount(handleA, handleC, 3);\n }\n }\n \n@@ -395,15 +423,14 @@ public void testPortLimit() throws InterruptedException {\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n final int limitPortCounts = randomIntBetween(1, 10);\n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Collections.singletonList(\"127.0.0.1\"),\n limitPortCounts,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test_\",\n TimeValue.timeValueSeconds(1));\n assertThat(discoveryNodes, hasSize(limitPortCounts));\n final Set<Integer> ports = new HashSet<>();\n@@ -439,15 +466,14 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n \n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Arrays.asList(hostname),\n 1,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test_\",\n TimeValue.timeValueSeconds(1)\n );\n \n@@ -490,16 +516,15 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 3));\n try {\n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Arrays.asList(\"hostname1\", \"hostname2\"),\n 1,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test+\",\n resolveTimeout);\n \n assertThat(discoveryNodes, hasSize(1));\n@@ -513,6 +538,156 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi\n }\n }\n \n+ public void testResolveReuseExistingNodeConnections() throws ExecutionException, InterruptedException {\n+ final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n+\n+ NetworkService networkService = new NetworkService(settings, Collections.emptyList());\n+\n+ final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(\n+ s,\n+ threadPool,\n+ BigArrays.NON_RECYCLING_INSTANCE,\n+ new NoneCircuitBreakerService(),\n+ new NamedWriteableRegistry(Collections.emptyList()),\n+ networkService,\n+ v);\n+\n+ NetworkHandle handleA = startServices(settings, threadPool, \"UZP_A\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleA.transportService);\n+ NetworkHandle handleB = startServices(settings, threadPool, \"UZP_B\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleB.transportService);\n+\n+ final boolean useHosts = randomBoolean();\n+ final Settings.Builder hostsSettingsBuilder = Settings.builder().put(\"cluster.name\", \"test\");\n+ if (useHosts) {\n+ hostsSettingsBuilder.putArray(\"discovery.zen.ping.unicast.hosts\",\n+ NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort()))\n+ );\n+ } else {\n+ hostsSettingsBuilder.put(\"discovery.zen.ping.unicast.hosts\", (String) null);\n+ }\n+ final Settings hostsSettings = hostsSettingsBuilder.build();\n+ final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n+\n+ // connection to reuse\n+ handleA.transportService.connectToNode(handleB.node);\n+\n+ // install a listener to check that no new connections are made\n+ handleA.transportService.addConnectionListener(new TransportConnectionListener() {\n+ @Override\n+ public void onConnectionOpened(DiscoveryNode node) {\n+ fail(\"should not open any connections. got [\" + node + \"]\");\n+ }\n+ });\n+\n+ final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n+ zenPingA.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId(\"UZP_A\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();\n+ }\n+ });\n+ closeables.push(zenPingA);\n+\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n+ zenPingB.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleB.node).localNodeId(\"UZP_B\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return state;\n+ }\n+ });\n+ closeables.push(zenPingB);\n+\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ ZenPing.PingResponse ping = pingResponses.iterator().next();\n+ assertThat(ping.node().getId(), equalTo(\"UZP_B\"));\n+ assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n+\n+ }\n+\n+ public void testPingingTemporalPings() throws ExecutionException, InterruptedException {\n+ final Settings settings = Settings.builder().put(\"cluster.name\", \"test\").put(TransportSettings.PORT.getKey(), 0).build();\n+\n+ NetworkService networkService = new NetworkService(settings, Collections.emptyList());\n+\n+ final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(\n+ s,\n+ threadPool,\n+ BigArrays.NON_RECYCLING_INSTANCE,\n+ new NoneCircuitBreakerService(),\n+ new NamedWriteableRegistry(Collections.emptyList()),\n+ networkService,\n+ v);\n+\n+ NetworkHandle handleA = startServices(settings, threadPool, \"UZP_A\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleA.transportService);\n+ NetworkHandle handleB = startServices(settings, threadPool, \"UZP_B\", Version.CURRENT, supplier, EnumSet.allOf(Role.class));\n+ closeables.push(handleB.transportService);\n+\n+ final Settings hostsSettings = Settings.builder()\n+ .put(\"cluster.name\", \"test\")\n+ .put(\"discovery.zen.ping.unicast.hosts\", (String) null) // use nodes for simplicity\n+ .build();\n+ final ClusterState state = ClusterState.builder(new ClusterName(\"test\")).version(randomPositiveLong()).build();\n+\n+ final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);\n+ zenPingA.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId(\"UZP_A\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();\n+ }\n+ });\n+ closeables.push(zenPingA);\n+\n+ // Node B doesn't know about A!\n+ TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);\n+ zenPingB.start(new PingContextProvider() {\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ return DiscoveryNodes.builder().add(handleB.node).localNodeId(\"UZP_B\").build();\n+ }\n+\n+ @Override\n+ public ClusterState clusterState() {\n+ return state;\n+ }\n+ });\n+ closeables.push(zenPingB);\n+\n+ {\n+ logger.info(\"pinging from UZP_A so UZP_B will learn about it\");\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ ZenPing.PingResponse ping = pingResponses.iterator().next();\n+ assertThat(ping.node().getId(), equalTo(\"UZP_B\"));\n+ assertThat(ping.getClusterStateVersion(), equalTo(state.version()));\n+ }\n+ {\n+ logger.info(\"pinging from UZP_B\");\n+ Collection<ZenPing.PingResponse> pingResponses = zenPingB.pingAndWait().toList();\n+ assertThat(pingResponses.size(), equalTo(1));\n+ ZenPing.PingResponse ping = pingResponses.iterator().next();\n+ assertThat(ping.node().getId(), equalTo(\"UZP_A\"));\n+ assertThat(ping.getClusterStateVersion(), equalTo(-1L)); // A has a block\n+ }\n+ }\n+\n public void testInvalidHosts() throws InterruptedException {\n final Logger logger = mock(Logger.class);\n final NetworkService networkService = new NetworkService(Settings.EMPTY, Collections.emptyList());\n@@ -529,72 +704,65 @@ public void testInvalidHosts() throws InterruptedException {\n final TransportService transportService =\n new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n closeables.push(transportService);\n- final AtomicInteger idGenerator = new AtomicInteger();\n- final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(\n+ final List<DiscoveryNode> discoveryNodes = TestUnicastZenPing.resolveHostsLists(\n executorService,\n logger,\n Arrays.asList(\"127.0.0.1:9300:9300\", \"127.0.0.1:9301\"),\n 1,\n transportService,\n- () -> Integer.toString(idGenerator.incrementAndGet()),\n+ \"test_\",\n TimeValue.timeValueSeconds(1));\n assertThat(discoveryNodes, hasSize(1)); // only one of the two is valid and will be used\n assertThat(discoveryNodes.get(0).getAddress().getAddress(), equalTo(\"127.0.0.1\"));\n assertThat(discoveryNodes.get(0).getAddress().getPort(), equalTo(9301));\n verify(logger).warn(eq(\"failed to resolve host [127.0.0.1:9300:9300]\"), Matchers.any(ExecutionException.class));\n }\n \n- // assert that we tried to ping each of the configured nodes at least once\n- private void assertCountersMoreThan(final NetworkHandle that, final NetworkHandle...handles) {\n- final HashMap<TransportAddress, Integer> moreThan = new HashMap<>();\n- for (final NetworkHandle handle : handles) {\n- assert handle != that;\n- moreThan.put(handle.address, 0);\n- }\n- assertCountersMoreThan(moreThan, that, handles);\n+ private void assertPingCount(final NetworkHandle fromNode, final NetworkHandle toNode, int expectedCount) {\n+ final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger());\n+ final String onNodeName = fromNode.node.getName();\n+ assertNotNull(\"handle for [\" + onNodeName + \"] has no 'expected' counter\", counter);\n+ final String forNodeName = toNode.node.getName();\n+ assertThat(\"node [\" + onNodeName + \"] ping count to [\" + forNodeName + \"] is unexpected\",\n+ counter.get(), equalTo(expectedCount));\n }\n \n- private void assertCountersMoreThan(\n- final Map<TransportAddress, Integer> moreThan,\n- final NetworkHandle that,\n- final NetworkHandle... handles) {\n- for (final NetworkHandle handle : handles) {\n- assert handle != that;\n- assertThat(that.counters.get(handle.address).get(), greaterThan(moreThan.get(handle.address)));\n- }\n+ private NetworkHandle startServices(\n+ final Settings settings,\n+ final ThreadPool threadPool,\n+ final String nodeId,\n+ final Version version,\n+ final BiFunction<Settings, Version, Transport> supplier) {\n+ return startServices(settings, threadPool, nodeId, version, supplier, emptySet());\n+\n }\n \n private NetworkHandle startServices(\n final Settings settings,\n final ThreadPool threadPool,\n final String nodeId,\n final Version version,\n- final BiFunction<Settings, Version, Transport> supplier) {\n- final Transport transport = supplier.apply(settings, version);\n- final TransportService transportService =\n- new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n+ final BiFunction<Settings, Version, Transport> supplier,\n+ final Set<Role> nodeRoles) {\n+ final Settings nodeSettings = Settings.builder().put(settings)\n+ .put(\"node.name\", nodeId)\n+ .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), \"internal:discovery/zen/unicast\")\n+ .build();\n+ final Transport transport = supplier.apply(nodeSettings, version);\n+ final MockTransportService transportService =\n+ new MockTransportService(nodeSettings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);\n transportService.start();\n transportService.acceptIncomingRequests();\n final ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();\n- transportService.addConnectionListener(new TransportConnectionListener() {\n-\n- @Override\n- public void onNodeConnected(DiscoveryNode node) {\n- }\n-\n+ transportService.addTracer(new MockTransportService.Tracer() {\n @Override\n- public void onConnectionOpened(DiscoveryNode node) {\n+ public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {\n counters.computeIfAbsent(node.getAddress(), k -> new AtomicInteger());\n counters.get(node.getAddress()).incrementAndGet();\n }\n-\n- @Override\n- public void onNodeDisconnected(DiscoveryNode node) {\n- }\n-\n });\n final DiscoveryNode node =\n- new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(), version);\n+ new DiscoveryNode(nodeId, nodeId, transportService.boundAddress().publishAddress(), emptyMap(), nodeRoles, version);\n transportService.setLocalNode(node);\n return new NetworkHandle(transport.boundAddress().publishAddress(), transportService, node, counters);\n }\n@@ -616,7 +784,123 @@ public NetworkHandle(\n this.node = discoveryNode;\n this.counters = counters;\n }\n+ }\n+\n+ private static class TestUnicastZenPing extends UnicastZenPing {\n+\n+ public TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle,\n+ UnicastHostsProvider unicastHostsProvider) {\n+ super(Settings.builder().put(\"node.name\", networkHandle.node.getName()).put(settings).build(),\n+ threadPool, networkHandle.transportService, unicastHostsProvider);\n+ }\n+\n+ volatile CountDownLatch allTasksCompleted;\n+ volatile AtomicInteger pendingTasks;\n+\n+ PingCollection pingAndWait() throws ExecutionException, InterruptedException {\n+ allTasksCompleted = new CountDownLatch(1);\n+ pendingTasks = new AtomicInteger();\n+ // make the three sending rounds to come as started\n+ markTaskAsStarted(\"send pings\");\n+ markTaskAsStarted(\"send pings\");\n+ markTaskAsStarted(\"send pings\");\n+ final CompletableFuture<PingCollection> response = new CompletableFuture<>();\n+ try {\n+ ping(response::complete, TimeValue.timeValueMillis(1), TimeValue.timeValueSeconds(1));\n+ } catch (Exception ex) {\n+ response.completeExceptionally(ex);\n+ }\n+ return response.get();\n+ }\n+\n+ @Override\n+ protected void finishPingingRound(PingingRound pingingRound) {\n+ // wait for all activity to finish before closing\n+ try {\n+ allTasksCompleted.await();\n+ } catch (InterruptedException e) {\n+ // ok, finish anyway\n+ }\n+ super.finishPingingRound(pingingRound);\n+ }\n+\n+ @Override\n+ protected void sendPings(TimeValue timeout, PingingRound pingingRound) {\n+ super.sendPings(timeout, pingingRound);\n+ markTaskAsCompleted(\"send pings\");\n+ }\n+\n+ @Override\n+ protected void submitToExecutor(AbstractRunnable abstractRunnable) {\n+ markTaskAsStarted(\"executor runnable\");\n+ super.submitToExecutor(new AbstractRunnable() {\n+ @Override\n+ public void onRejection(Exception e) {\n+ try {\n+ super.onRejection(e);\n+ } finally {\n+ markTaskAsCompleted(\"executor runnable (rejected)\");\n+ }\n+ }\n+\n+ @Override\n+ public void onAfter() {\n+ markTaskAsCompleted(\"executor runnable\");\n+ }\n+\n+ @Override\n+ protected void doRun() throws Exception {\n+ abstractRunnable.run();\n+ }\n+\n+ @Override\n+ public void onFailure(Exception e) {\n+ // we shouldn't really end up here.\n+ throw new AssertionError(\"unexpected error\", e);\n+ }\n+ });\n+ }\n+\n+ private void markTaskAsStarted(String task) {\n+ logger.trace(\"task [{}] started. count [{}]\", task, pendingTasks.incrementAndGet());\n+ }\n \n+ private void markTaskAsCompleted(String task) {\n+ final int left = pendingTasks.decrementAndGet();\n+ logger.trace(\"task [{}] completed. count [{}]\", task, left);\n+ if (left == 0) {\n+ allTasksCompleted.countDown();\n+ }\n+ }\n+\n+ @Override\n+ protected TransportResponseHandler<UnicastPingResponse> getPingResponseHandler(PingingRound pingingRound, DiscoveryNode node) {\n+ markTaskAsStarted(\"ping [\" + node + \"]\");\n+ TransportResponseHandler<UnicastPingResponse> original = super.getPingResponseHandler(pingingRound, node);\n+ return new TransportResponseHandler<UnicastPingResponse>() {\n+ @Override\n+ public UnicastPingResponse newInstance() {\n+ return original.newInstance();\n+ }\n+\n+ @Override\n+ public void handleResponse(UnicastPingResponse response) {\n+ original.handleResponse(response);\n+ markTaskAsCompleted(\"ping [\" + node + \"]\");\n+ }\n+\n+ @Override\n+ public void handleException(TransportException exp) {\n+ original.handleException(exp);\n+ markTaskAsCompleted(\"ping [\" + node + \"] (error)\");\n+ }\n+\n+ @Override\n+ public String executor() {\n+ return original.executor();\n+ }\n+ };\n+ }\n }\n \n }", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java", "status": "modified" }, { "diff": "@@ -68,7 +68,7 @@ public void testPingCollection() {\n Collections.shuffle(pings, random());\n \n ZenPing.PingCollection collection = new ZenPing.PingCollection();\n- collection.addPings(pings);\n+ pings.forEach(collection::addPing);\n \n List<ZenPing.PingResponse> aggregate = collection.toList();\n ", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java", "status": "modified" }, { "diff": "@@ -208,8 +208,8 @@ public long serverOpen() {\n \n @Override\n public NodeChannels getConnection(DiscoveryNode node) {\n- return new NodeChannels(node, new Object[ConnectionProfile.LIGHT_PROFILE.getNumConnections()],\n- ConnectionProfile.LIGHT_PROFILE);\n+ return new NodeChannels(node, new Object[MockTcpTransport.LIGHT_PROFILE.getNumConnections()],\n+ MockTcpTransport.LIGHT_PROFILE);\n }\n };\n DiscoveryNode node = new DiscoveryNode(\"foo\", buildNewFakeTransportAddress(), Version.CURRENT);", "filename": "core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java", "status": "modified" }, { "diff": "@@ -113,24 +113,14 @@ public void testConnectToNodeLight() throws IOException {\n emptyMap(),\n emptySet(),\n Version.CURRENT.minimumCompatibilityVersion());\n- try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, ConnectionProfile.LIGHT_PROFILE)){\n+ try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, MockTcpTransport.LIGHT_PROFILE)){\n DiscoveryNode connectedNode = handleA.transportService.handshake(connection, timeout);\n assertNotNull(connectedNode);\n // the name and version should be updated\n assertEquals(connectedNode.getName(), \"TS_B\");\n assertEquals(connectedNode.getVersion(), handleB.discoveryNode.getVersion());\n assertFalse(handleA.transportService.nodeConnected(discoveryNode));\n }\n-\n- DiscoveryNode connectedNode =\n- handleA.transportService.connectToNodeAndHandshake(discoveryNode, timeout);\n- assertNotNull(connectedNode);\n-\n- // the name and version should be updated\n- assertEquals(connectedNode.getName(), \"TS_B\");\n- assertEquals(connectedNode.getVersion(), handleB.discoveryNode.getVersion());\n- assertTrue(handleA.transportService.nodeConnected(discoveryNode));\n-\n }\n \n public void testMismatchedClusterName() {\n@@ -145,7 +135,7 @@ public void testMismatchedClusterName() {\n Version.CURRENT.minimumCompatibilityVersion());\n IllegalStateException ex = expectThrows(IllegalStateException.class, () -> {\n try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode,\n- ConnectionProfile.LIGHT_PROFILE)) {\n+ MockTcpTransport.LIGHT_PROFILE)) {\n handleA.transportService.handshake(connection, timeout);\n }\n });\n@@ -166,7 +156,7 @@ public void testIncompatibleVersions() {\n Version.CURRENT.minimumCompatibilityVersion());\n IllegalStateException ex = expectThrows(IllegalStateException.class, () -> {\n try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode,\n- ConnectionProfile.LIGHT_PROFILE)) {\n+ MockTcpTransport.LIGHT_PROFILE)) {\n handleA.transportService.handshake(connection, timeout);\n }\n });", "filename": "core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java", "status": "modified" }, { "diff": "@@ -43,7 +43,7 @@\n import java.util.stream.Stream;\n \n import static org.elasticsearch.discovery.zen.UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT;\n-import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveDiscoveryNodes;\n+import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveHostsLists;\n \n /**\n * An implementation of {@link UnicastHostsProvider} that reads hosts/ports\n@@ -97,13 +97,13 @@ public List<DiscoveryNode> buildDynamicNodes() {\n \n final List<DiscoveryNode> discoNodes = new ArrayList<>();\n try {\n- discoNodes.addAll(resolveDiscoveryNodes(\n+ discoNodes.addAll(resolveHostsLists(\n executorService,\n logger,\n hostsList,\n 1,\n transportService,\n- () -> UNICAST_HOST_PREFIX + nodeIdGenerator.incrementAndGet() + \"#\",\n+ UNICAST_HOST_PREFIX,\n resolveTimeout));\n } catch (InterruptedException e) {\n throw new RuntimeException(e);", "filename": "plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java", "status": "modified" }, { "diff": "@@ -33,9 +33,7 @@\n import org.elasticsearch.transport.MockTcpTransport;\n import org.elasticsearch.transport.TransportService;\n import org.junit.After;\n-import org.junit.AfterClass;\n import org.junit.Before;\n-import org.junit.BeforeClass;\n \n import java.io.BufferedWriter;\n import java.io.IOException;\n@@ -44,7 +42,6 @@\n import java.util.Arrays;\n import java.util.Collections;\n import java.util.List;\n-import java.util.concurrent.Executor;\n import java.util.concurrent.ExecutorService;\n import java.util.concurrent.Executors;\n \n@@ -99,13 +96,13 @@ public void testBuildDynamicNodes() throws Exception {\n assertEquals(hostEntries.size() - 1, nodes.size()); // minus 1 because we are ignoring the first line that's a comment\n assertEquals(\"192.168.0.1\", nodes.get(0).getAddress().getAddress());\n assertEquals(9300, nodes.get(0).getAddress().getPort());\n- assertEquals(UNICAST_HOST_PREFIX + \"1#\", nodes.get(0).getId());\n+ assertEquals(UNICAST_HOST_PREFIX + \"192.168.0.1_0#\", nodes.get(0).getId());\n assertEquals(\"192.168.0.2\", nodes.get(1).getAddress().getAddress());\n assertEquals(9305, nodes.get(1).getAddress().getPort());\n- assertEquals(UNICAST_HOST_PREFIX + \"2#\", nodes.get(1).getId());\n+ assertEquals(UNICAST_HOST_PREFIX + \"192.168.0.2:9305_0#\", nodes.get(1).getId());\n assertEquals(\"255.255.23.15\", nodes.get(2).getAddress().getAddress());\n assertEquals(9300, nodes.get(2).getAddress().getPort());\n- assertEquals(UNICAST_HOST_PREFIX + \"3#\", nodes.get(2).getId());\n+ assertEquals(UNICAST_HOST_PREFIX + \"255.255.23.15_0#\", nodes.get(2).getId());\n }\n \n public void testEmptyUnicastHostsFile() throws Exception {", "filename": "plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java", "status": "modified" }, { "diff": "@@ -28,10 +28,9 @@\n import org.elasticsearch.discovery.zen.ZenPing;\n \n import java.util.HashMap;\n-import java.util.List;\n import java.util.Map;\n import java.util.Set;\n-import java.util.stream.Collectors;\n+import java.util.function.Consumer;\n \n /**\n * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging\n@@ -62,7 +61,7 @@ public void start(PingContextProvider contextProvider) {\n }\n \n @Override\n- public void ping(PingListener listener, TimeValue timeout) {\n+ public void ping(Consumer<PingCollection> resultsConsumer, TimeValue timeout) {\n logger.info(\"pinging using mock zen ping\");\n synchronized (activeNodesPerCluster) {\n Set<MockZenPing> activeNodes = getActiveNodesForCurrentCluster();\n@@ -76,11 +75,12 @@ public void ping(PingListener listener, TimeValue timeout) {\n activeNodes = getActiveNodesForCurrentCluster();\n }\n lastDiscoveredPings = activeNodes;\n- List<PingResponse> responseList = activeNodes.stream()\n+ PingCollection pingCollection = new PingCollection();\n+ activeNodes.stream()\n .filter(p -> p != this) // remove this as pings are not expected to return the local node\n .map(MockZenPing::getPingResponse)\n- .collect(Collectors.toList());\n- listener.onPing(responseList);\n+ .forEach(pingCollection::addPing);\n+ resultsConsumer.accept(pingCollection);\n }\n }\n ", "filename": "test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java", "status": "modified" }, { "diff": "@@ -29,9 +29,7 @@\n import org.elasticsearch.action.ActionListenerResponseHandler;\n import org.elasticsearch.action.support.PlainActionFuture;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n-import org.elasticsearch.common.io.stream.InputStreamStreamInput;\n import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n-import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.logging.Loggers;\n@@ -54,8 +52,6 @@\n import org.junit.Before;\n \n import java.io.IOException;\n-import java.io.InputStream;\n-import java.io.OutputStream;\n import java.io.UncheckedIOException;\n import java.net.InetAddress;\n import java.net.InetSocketAddress;\n@@ -1358,7 +1354,7 @@ public void handleException(TransportException exp) {\n // all is well\n }\n \n- try (Transport.Connection connection = serviceB.openConnection(nodeA, ConnectionProfile.LIGHT_PROFILE)){\n+ try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)){\n serviceB.handshake(connection, 100);\n fail(\"exception should be thrown\");\n } catch (IllegalStateException e) {\n@@ -1416,7 +1412,7 @@ public void handleException(TransportException exp) {\n // all is well\n }\n \n- try (Transport.Connection connection = serviceB.openConnection(nodeA, ConnectionProfile.LIGHT_PROFILE)){\n+ try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)){\n serviceB.handshake(connection, 100);\n fail(\"exception should be thrown\");\n } catch (IllegalStateException e) {", "filename": "test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java", "status": "modified" }, { "diff": "@@ -66,6 +66,23 @@\n */\n public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel> {\n \n+ /**\n+ * A pre-built light connection profile that shares a single connection across all\n+ * types.\n+ */\n+ public static final ConnectionProfile LIGHT_PROFILE;\n+\n+ static {\n+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ builder.addConnections(1,\n+ TransportRequestOptions.Type.BULK,\n+ TransportRequestOptions.Type.PING,\n+ TransportRequestOptions.Type.RECOVERY,\n+ TransportRequestOptions.Type.REG,\n+ TransportRequestOptions.Type.STATE);\n+ LIGHT_PROFILE = builder.build();\n+ }\n+\n private final ExecutorService executor;\n private final Version mockVersion;\n \n@@ -159,7 +176,7 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx\n @Override\n protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile) throws IOException {\n final MockChannel[] mockChannels = new MockChannel[1];\n- final NodeChannels nodeChannels = new NodeChannels(node, mockChannels, ConnectionProfile.LIGHT_PROFILE); // we always use light here\n+ final NodeChannels nodeChannels = new NodeChannels(node, mockChannels, LIGHT_PROFILE); // we always use light here\n boolean success = false;\n final Socket socket = new Socket();\n try {", "filename": "test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java", "status": "modified" } ] }
{ "body": "* Elasticsearch versions: \r\n * 5.1.1 (official docker image)\r\n * master\r\n\r\nBased on https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax it should be possible to use * in the field names if it is properly escaped. The following works as expected:\r\n\r\n```\r\nGET /metricbeat-*/_search\r\n{\r\n \"query\": {\r\n \"query_string\": {\r\n \"fields\": [\"system.*.cores\"],\r\n \"query\": \"4\"\r\n }\r\n }\r\n}\r\n```\r\n\r\nThe correct results are returned. Rewriting this with only a query as below fails:\r\n\r\n```\r\nGET /metricbeat-*/_search\r\n{\r\n \"query\": {\r\n \"query_string\": {\r\n \"query\": \"system.\\*.cores:4\"\r\n }\r\n }\r\n}\r\n```\r\n\r\nThis returns the following error:\r\n```\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"json_parse_exception\",\r\n \"reason\": \"Unrecognized character escape '*' (code 42)\\n at [Source: org.elasticsearch.transport.netty4.ByteBufStreamInput@39e64a6c; line: 4, column: 26]\"\r\n }\r\n ],\r\n \"type\": \"json_parse_exception\",\r\n \"reason\": \"Unrecognized character escape '*' (code 42)\\n at [Source: org.elasticsearch.transport.netty4.ByteBufStreamInput@39e64a6c; line: 4, column: 26]\"\r\n },\r\n \"status\": 500\r\n}\r\n```\r\n\r\nTo tests is it is best to run metricbeat with its default configuration and load data into elasticsearch.", "comments": [ { "body": "Since it's JSON you'll need to use double backslash. The first one is for JSON and the second one is for the query_string query. \r\n\r\nSo this:\r\n````\r\nGET /metricbeat-*/_search\r\n{\r\n \"query\": {\r\n \"query_string\": {\r\n \"query\": \"system.\\\\*.cores:4\"\r\n }\r\n }\r\n}\r\n````\r\n... works.", "created_at": "2016-12-19T10:40:07Z" }, { "body": "I opened a PR to make it explicit in the docs https://github.com/elastic/elasticsearch/pull/22257", "created_at": "2016-12-19T10:41:50Z" }, { "body": "@jimczi I can confirm it works with `\\\\`\r\n@jpountz Thanks for the docs fix.", "created_at": "2016-12-19T11:31:35Z" } ], "number": 22255, "title": "Wildcard in query_string query returns json_parse_exception" }
{ "body": "Relates #22255\r\n", "number": 22257, "review_comments": [], "title": "Be explicit about the fact backslashes need to be escaped." }
{ "commits": [ { "message": "Be explicit about the fact backslashes need to be escaped.\n\nRelates #22255" } ], "files": [ { "diff": "@@ -197,7 +197,24 @@ GET /_search\n \n Another option is to provide the wildcard fields search in the query\n string itself (properly escaping the `*` sign), for example:\n-`city.\\*:something`.\n+`city.\\*:something`:\n+\n+[source,js]\n+--------------------------------------------------\n+GET /_search\n+{\n+ \"query\": {\n+ \"query_string\" : {\n+ \"query\" : \"city.\\\\*:(this AND that OR thus)\",\n+ \"use_dis_max\" : true\n+ }\n+ }\n+}\n+--------------------------------------------------\n+// CONSOLE\n+\n+NOTE: Since `\\` (backslash) is a special character in json strings, it needs to\n+be escaped, hence the two backslashes in the above `query_string`.\n \n When running the `query_string` query against multiple fields, the\n following additional parameters are allowed:", "filename": "docs/reference/query-dsl/query-string-query.asciidoc", "status": "modified" } ] }
{ "body": "This pre-built token filter is inconsistent with other pre-built token filters since it includes `filter` in its name while the other ones don't.", "comments": [ { "body": "Hai! Can I kindly take up this issue and submit a PR?", "created_at": "2017-01-21T08:15:00Z" }, { "body": "Thanks for showing interest in contributing to Elasticsearch.\r\n\r\nHere's a guide to how to go about it: https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md", "created_at": "2017-01-23T18:31:13Z" }, { "body": "Should this issue be closed after the PR ?", "created_at": "2017-08-21T08:47:52Z" } ], "number": 21978, "title": "Rename `delimiter_payload_filter` to `delimiter_payload`." }
{ "body": "Closes #21978 ", "number": 22242, "review_comments": [], "title": "The `delimited_payload_filter` token filter has been renamed to `delimited_payload`" }
{ "commits": [ { "message": "The `delimited_payload_filter` token filter has been renamed to `delimited_payload`" } ], "files": [ { "diff": "@@ -223,7 +223,7 @@ private NamedRegistry<AnalysisProvider<TokenFilterFactory>> setupTokenFilters(Li\n tokenFilters.register(\"snowball\", SnowballTokenFilterFactory::new);\n tokenFilters.register(\"stemmer\", StemmerTokenFilterFactory::new);\n tokenFilters.register(\"word_delimiter\", WordDelimiterTokenFilterFactory::new);\n- tokenFilters.register(\"delimited_payload_filter\", DelimitedPayloadTokenFilterFactory::new);\n+ tokenFilters.register(\"delimited_payload\", DelimitedPayloadTokenFilterFactory::new);\n tokenFilters.register(\"elision\", ElisionTokenFilterFactory::new);\n tokenFilters.register(\"keep\", requriesAnalysisSettings(KeepWordFilterFactory::new));\n tokenFilters.register(\"keep_types\", requriesAnalysisSettings(KeepTypesFilterFactory::new));", "filename": "core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java", "status": "modified" }, { "diff": "@@ -431,7 +431,7 @@ public TokenStream create(TokenStream tokenStream, Version version) {\n }\n },\n \n- DELIMITED_PAYLOAD_FILTER(CachingStrategy.ONE) {\n+ DELIMITED_PAYLOAD(CachingStrategy.ONE) {\n @Override\n public TokenStream create(TokenStream tokenStream, Version version) {\n return new DelimitedPayloadTokenFilter(tokenStream, DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER);", "filename": "core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java", "status": "modified" }, { "diff": "@@ -420,10 +420,10 @@ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOExceptio\n Settings.builder()\n .put(indexSettings())\n .put(\"index.analysis.analyzer.payload_test.tokenizer\", \"whitespace\")\n- .putArray(\"index.analysis.analyzer.payload_test.filter\", \"my_delimited_payload_filter\")\n- .put(\"index.analysis.filter.my_delimited_payload_filter.delimiter\", delimiter)\n- .put(\"index.analysis.filter.my_delimited_payload_filter.encoding\", encodingString)\n- .put(\"index.analysis.filter.my_delimited_payload_filter.type\", \"delimited_payload_filter\")));\n+ .putArray(\"index.analysis.analyzer.payload_test.filter\", \"my_delimited_payload\")\n+ .put(\"index.analysis.filter.my_delimited_payload.delimiter\", delimiter)\n+ .put(\"index.analysis.filter.my_delimited_payload.encoding\", encodingString)\n+ .put(\"index.analysis.filter.my_delimited_payload.type\", \"delimited_payload\")));\n \n client().prepareIndex(\"test\", \"type1\", Integer.toString(1))\n .setSource(jsonBuilder().startObject().field(\"field\", queryString).endObject()).execute().actionGet();", "filename": "core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java", "status": "modified" }, { "diff": "@@ -539,7 +539,7 @@ void initTestData() throws InterruptedException, ExecutionException, IOException\n .putArray(\"index.analysis.analyzer.payload_int.filter\", \"delimited_int\")\n .put(\"index.analysis.filter.delimited_int.delimiter\", \"|\")\n .put(\"index.analysis.filter.delimited_int.encoding\", \"int\")\n- .put(\"index.analysis.filter.delimited_int.type\", \"delimited_payload_filter\")));\n+ .put(\"index.analysis.filter.delimited_int.type\", \"delimited_payload\")));\n indexRandom(true, client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"int_payload_field\", \"a|1 b|2 b|3 c|4 d \"), client()\n .prepareIndex(\"test\", \"type1\", \"2\").setSource(\"int_payload_field\", \"b|1 b|2 c|3 d|4 a \"),\n client().prepareIndex(\"test\", \"type1\", \"3\").setSource(\"int_payload_field\", \"b|1 c|2 d|3 a|4 b \"));\n@@ -807,17 +807,17 @@ public void testAllExceptPosAndOffset() throws Exception {\n .putArray(\"index.analysis.analyzer.payload_float.filter\", \"delimited_float\")\n .put(\"index.analysis.filter.delimited_float.delimiter\", \"|\")\n .put(\"index.analysis.filter.delimited_float.encoding\", \"float\")\n- .put(\"index.analysis.filter.delimited_float.type\", \"delimited_payload_filter\")\n+ .put(\"index.analysis.filter.delimited_float.type\", \"delimited_payload\")\n .put(\"index.analysis.analyzer.payload_string.tokenizer\", \"whitespace\")\n .putArray(\"index.analysis.analyzer.payload_string.filter\", \"delimited_string\")\n .put(\"index.analysis.filter.delimited_string.delimiter\", \"|\")\n .put(\"index.analysis.filter.delimited_string.encoding\", \"identity\")\n- .put(\"index.analysis.filter.delimited_string.type\", \"delimited_payload_filter\")\n+ .put(\"index.analysis.filter.delimited_string.type\", \"delimited_payload\")\n .put(\"index.analysis.analyzer.payload_int.tokenizer\", \"whitespace\")\n .putArray(\"index.analysis.analyzer.payload_int.filter\", \"delimited_int\")\n .put(\"index.analysis.filter.delimited_int.delimiter\", \"|\")\n .put(\"index.analysis.filter.delimited_int.encoding\", \"int\")\n- .put(\"index.analysis.filter.delimited_int.type\", \"delimited_payload_filter\")\n+ .put(\"index.analysis.filter.delimited_int.type\", \"delimited_payload\")\n .put(\"index.number_of_shards\", 1)));\n indexRandom(true, client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"float_payload_field\", \"a|1 b|2 a|3 b \"), client()\n .prepareIndex(\"test\", \"type1\", \"2\").setSource(\"string_payload_field\", \"a|a b|b a|a b \"),", "filename": "core/src/test/java/org/elasticsearch/script/IndexLookupIT.java", "status": "modified" }, { "diff": "@@ -1,7 +1,7 @@\n [[analysis-delimited-payload-tokenfilter]]\n === Delimited Payload Token Filter\n \n-Named `delimited_payload_filter`. Splits tokens into tokens and payload whenever a delimiter character is found.\n+Named `delimited_payload`. Splits tokens into tokens and payload whenever a delimiter character is found.\n \n Example: \"the|1 quick|2 fox|3\" is split by default into tokens `the`, `quick`, and `fox` with payloads `1`, `2`, and `3` respectively.\n ", "filename": "docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@ way to reindex old indices is to use the `reindex` API.\n * <<breaking_60_plugins_changes>>\n * <<breaking_60_indices_changes>>\n * <<breaking_60_scripting_changes>>\n+* <<breaking_60_analysis_changes>>\n \n include::migrate_6_0/cat.asciidoc[]\n \n@@ -57,3 +58,5 @@ include::migrate_6_0/plugins.asciidoc[]\n include::migrate_6_0/indices.asciidoc[]\n \n include::migrate_6_0/scripting.asciidoc[]\n+\n+include::migrate_6_0/analysis.asciidoc[]", "filename": "docs/reference/migration/migrate_6_0.asciidoc", "status": "modified" }, { "diff": "@@ -0,0 +1,6 @@\n+[[breaking_60_analysis_changes]]\n+=== Analysis changes\n+\n+==== `delimited_payload_filter` is now `delimited_payload`\n+\n+The `delimited_payload_filter` token filter has been renamed to `delimited_payload`.", "filename": "docs/reference/migration/migrate_6_0/analysis.asciidoc", "status": "added" }, { "diff": "@@ -299,7 +299,7 @@ private static String toCamelCase(String s) {\n case STEMMER:\n luceneFactoryClazz = PorterStemFilterFactory.class;\n break;\n- case DELIMITED_PAYLOAD_FILTER:\n+ case DELIMITED_PAYLOAD:\n luceneFactoryClazz = org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class;\n break;\n case LIMIT:", "filename": "test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java", "status": "modified" } ] }
{ "body": "My goal was to have a dynamically created date field with the ignore_malformed option set. However, when I have both dynamic_date_formats and dynamic_templates for the date type, the dynamic_date_formats is ignored.\n\nSo let's create our index..\n\n```\ncurl -XPOST localhost:9200/test -d '{\n \"mappings\": {\n \"test\": {\n \"dynamic_date_formats\": [\n \"yyyy-MM-dd\"\n ],\n \"dynamic_templates\": [\n {\n \"dates_ignore_malformed\": {\n \"path_match\": \"*\",\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"ignore_malformed\": true\n }\n }\n }\n ]\n }\n }\n}'\n```\n\nAnd add some data..\n\n```\ncurl -XPOST localhost:9200/test/test/1 -d '{\n \"something\": \"2014-01-05\"\n}'\n```\n\nAnd now get the mappings back..\n\n```\ncurl -XGET localhost:9200/test/_mappings\n->\n{\n \"test\": {\n \"mappings\": {\n \"test\": {\n \"dynamic_date_formats\": [\n \"yyyy-MM-dd\"\n ],\n \"dynamic_templates\": [\n {\n \"dates_ignore_malformed\": {\n \"mapping\": {\n \"ignore_malformed\": true\n },\n \"match_mapping_type\": \"date\",\n \"path_match\": \"*\"\n }\n }\n ],\n \"properties\": {\n \"something\": {\n \"type\": \"date\",\n \"ignore_malformed\": true,\n \"format\": \"dateOptionalTime\"\n }\n }\n }\n }\n }\n}\n```\n\nSee how the format for \"something\" is dateOptionalTime? Had I not included the dynamic_templates, that would have been (and should have been) \"yyyy-MM-dd\".\n", "comments": [ { "body": "First Elasticsearch checks the unknown string field against the list of date formats in `dynamic_date_formats` to determine whether the field contains a date or a string.\n\nOnce it decides that it is a date, it applies the mapping found in the templates, in which you don't specify the date format. Thus it uses the default matching format which is `dateOptionalTime`. Why not just specify the format you want in the template mapping? eg:\n\n```\nDELETE test\n\nPOST /test\n{\n \"mappings\": {\n \"test\": {\n \"dynamic_date_formats\": [\n \"yyyy/MM/dd\" \n ],\n \"dynamic_templates\": [\n {\n \"dates_ignore_malformed\": {\n \"path_match\": \"*\",\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"format\": \"yyyy/MM/dd\",\n \"ignore_malformed\": true\n }\n }\n }\n ]\n }\n }\n}\n\nPOST /test/test/1\n{\n \"format_one\": \"2014-01-05\"\n}\n\nPOST /test/test/2\n{\n \"format_two\": \"2014/01/05\"\n}\n\nGET _mapping\n```\n\nThis returns:\n\n```\n{\n \"test\": {\n \"mappings\": {\n \"test\": {\n \"dynamic_date_formats\": [\n \"yyyy/MM/dd\"\n ],\n \"dynamic_templates\": [\n {\n \"dates_ignore_malformed\": {\n \"mapping\": {\n \"ignore_malformed\": true,\n \"format\": \"yyyy/MM/dd\"\n },\n \"match_mapping_type\": \"date\",\n \"path_match\": \"*\"\n }\n }\n ],\n \"properties\": {\n \"format_one\": {\n \"type\": \"string\"\n },\n \"format_two\": {\n \"type\": \"date\",\n \"ignore_malformed\": true,\n \"format\": \"yyyy/MM/dd\"\n }\n }\n }\n }\n }\n} \n```\n", "created_at": "2015-01-26T19:56:54Z" }, { "body": "So what if you wanted to use dynamic_date_formats to specify multiple possible formats? Does that prevent you from also using ignore_malformed?\n", "created_at": "2015-01-27T00:25:45Z" }, { "body": "The first time that field is seen, it will be mapped as a date only if it matches one of the formats you listed in `dynamic_date_formats`, otherwise it will be mapped as a string.\n\nOnce the field is mapped, then `ignore_malformed` will allow you to ignore malformed dates later on. But if the first date seen is malformed, then the field will be a string instead. \n\nThis is the same way that the mapping would work if you do not specify dynamic_date_formats or templates.\n", "created_at": "2015-01-27T09:19:41Z" }, { "body": "I appreciate you taking the time to help me clear this up, but it still seems like the thing I want to do isn't possible. In my data set, the date might be in one of several formats, and I won't know which one ahead of time. That's why I'm trying to use dynamic_date_formats to specify the possibilities, but I guess my first example was unclear because I only listed one. This is more what I was going for:\n\n```\n{\n \"mappings\": {\n \"test\": {\n \"dynamic_date_formats\": [\n \"yyyy-MM-dd\",\n \"yyyy/MM/dd\"\n ],\n \"dynamic_templates\": [\n {\n \"dates_ignore_malformed\": {\n \"path_match\": \"*\",\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"ignore_malformed\": true\n }\n }\n }\n ]\n }\n }\n}\n```\n\nSo whichever format the first date has would be applied to the new field, and the date would be expected in that format from then on. If it came in a different format from then on, ignore_malformed would cause it to be ignored.\n\nI guess that isn't possible with ElasticSearch?\n", "created_at": "2015-01-27T09:51:44Z" }, { "body": "@SergeyTsalkov Actually you're completely right - the `dynamic_date_formats` are not used after adding the mapping to determine the `format` for the date field:\n\n```\nDELETE test\n\nPOST /test\n{\n \"mappings\": {\n \"test\": {\n \"dynamic_date_formats\": [\n \"yyyy-MM-dd\" ,\n \"dd/MM/yyyy\"\n ],\n \"dynamic_templates\": [\n {\n \"dates_ignore_malformed\": {\n \"path_match\": \"*\",\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"type\":\"date\"\n }\n }\n }\n ]\n }\n }\n}\n```\n\nThis results in a date field with format `dateOptionalTime`:\n\n```\nPOST /test/test/1\n{\n \"format_one\": \"2014-01-05\"\n}\n```\n\nThis throws an exception indicating that it is ignoring the `dynamic_date_formats`: failed to parse date field [05/01/2014], tried both date format [dateOptionalTime], and timestamp number with locale []\n\n```\nPOST /test/test/2\n{\n \"format_two\": \"05/01/2014\"\n}\n\nGET _mapping\n```\n", "created_at": "2015-01-27T10:16:31Z" }, { "body": "> the dynamic_date_formats are not used after adding the mapping to determine the format for the date field\n\nBut that's the way it should be? As you said before, `dynamic_date_formats` is to determine _what_ is a date. There was then no format put on the dynamic field, and when it tried to parse the value (with the actual field mapping format of `dateOptionalTime`) it failed. I don't think `dynamic_date_formats` should in any ways adjust the dynamic field's format (that is what the dynamic field mapping is for).\n", "created_at": "2015-01-27T15:38:43Z" }, { "body": "@rjernst if you didn't specify any dynamic_date_formats or any templates, then it would check a string against the default list of dynamic date formats and apply the first format that matches to the field.\n\nWith the example above, it uses the specified dynamic date formats to determine whether the field is a date or not, but then it uses the default list of dynamic date formats to determine which format to apply. Instead, it should use the custom list.\n", "created_at": "2015-01-27T18:03:40Z" }, { "body": "This is the way to do it. Set your dynamic_date_format, then set the same ones as the value of \"format\" in your dynamic mapping.\n\n```\n \"dynamic_date_formats\": [\n \"yyyy/MM/dd HH:mm:ss Z\",\n \"yyyy/MM/dd Z\",\n \"yyyy/MM/dd HH:mm:ss\",\n \"yyyy/MM/dd\",\n \"yyyy-MM-dd HH:mm:ss Z\",\n \"yyyy-MM-dd Z\",\n \"yyyy-MM-dd HH:mm:ss\",\n \"yyyy-MM-dd\",\n \"MM/dd/yyyy\",\n \"MM-dd-yyyy\"\n ],\n```\n\n```\n \"date_template\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"type\": \"date\",\n \"index\": \"not_analyzed\",\n \"format\": \"yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||MM/dd/yyyy||MM-dd-yyyy\"\n }\n }\n```\n\nThis way, you can add dates of different format and the field doesn't get locked down to a single format.\n", "created_at": "2016-10-13T01:01:51Z" }, { "body": "I'm using Elasticsearch 2.3.1 and the dynamic mapping posted above fails for me:\r\n \"date_template\": {\r\n \"match\": \"*\",\r\n \"match_mapping_type\": \"date\",\r\n \"mapping\": {\r\n \"type\": \"date\",\r\n \"index\": \"not_analyzed\",\r\n \"format\": \"yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||MM/dd/yyyy||MM-dd-yyyy\"\r\n }\r\n }\r\n\r\nI've also tried the following but it too does not give me the behavior I want. I do not always know the format of my date field for an index before indexing a document. I want the index to be able to accept different formats.\r\n\r\n \"dynamic_date_formats\": [\r\n \"yyyy/MM/dd HH:mm:ss Z\",\r\n \"yyyy/MM/dd Z\",\r\n \"yyyy/MM/dd HH:mm:ss\",\r\n \"yyyy/MM/dd\",\r\n \"yyyy-MM-dd HH:mm:ss Z\",\r\n \"yyyy-MM-dd Z\",\r\n \"yyyy-MM-dd HH:mm:ss\",\r\n \"yyyy-MM-dd\",\r\n \"MM/dd/yyyy\",\r\n \"MM-dd-yyyy\"\r\n ],\r\n \"dynamic_templates\": [\r\n {\r\n \"dates\": {\r\n \"match_mapping_type\": [\"*time\", \"date\", \"TIME\"],\r\n \"match_pattern\": \"regex\",\r\n \"mapping\": {\r\n \"type\": \"date\",\r\n \"format\": \"YYYY-MM-dd HH:mm:ss | YYYY-MM-dd | YYYY-MM-dd HH:mm:ss.SS | YYYY:MM-dd HH:mm:ss.SSSS\"\r\n }\r\n }\r\n }\r\n ]\r\n", "created_at": "2017-01-05T18:01:48Z" } ], "number": 9410, "title": "dynamic_date_formats ignored if dynamic_templates for date is present" }
{ "body": "Unless the dynamic templates define an explicit format in the mapping\r\ndefinition: in that case the explicit mapping should have precedence.\r\n\r\nCloses #9410", "number": 22174, "review_comments": [], "title": "Dynamic `date` fields should use the `format` that was used to detect it is a date." }
{ "commits": [ { "message": "Dynamic `date` fields should use the `format` that was used to detect it is a date.\n\nUnless the dynamic templates define an explicit format in the mapping\ndefinition: in that case the explicit mapping should have precedence.\n\nCloses #9410" } ], "files": [ { "diff": "@@ -70,6 +70,7 @@ public static class Builder extends FieldMapper.Builder<Builder, DateFieldMapper\n \n private Boolean ignoreMalformed;\n private Locale locale;\n+ private boolean dateTimeFormatterSet = false;\n \n public Builder(String name) {\n super(name, new DateFieldType(), new DateFieldType());\n@@ -97,8 +98,14 @@ protected Explicit<Boolean> ignoreMalformed(BuilderContext context) {\n return Defaults.IGNORE_MALFORMED;\n }\n \n+ /** Whether an explicit format for this date field has been set already. */\n+ public boolean isDateTimeFormatterSet() {\n+ return dateTimeFormatterSet;\n+ }\n+\n public Builder dateTimeFormatter(FormatDateTimeFormatter dateTimeFormatter) {\n fieldType().setDateTimeFormatter(dateTimeFormatter);\n+ dateTimeFormatterSet = true;\n return this;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java", "status": "modified" }, { "diff": "@@ -694,6 +694,12 @@ private static Mapper.Builder<?,?> createBuilderFromDynamicValue(final ParseCont\n if (builder == null) {\n builder = newDateBuilder(currentFieldName, dateTimeFormatter, Version.indexCreated(context.indexSettings()));\n }\n+ if (builder instanceof DateFieldMapper.Builder) {\n+ DateFieldMapper.Builder dateBuilder = (DateFieldMapper.Builder) builder;\n+ if (dateBuilder.isDateTimeFormatterSet() == false) {\n+ dateBuilder.dateTimeFormatter(dateTimeFormatter);\n+ }\n+ }\n return builder;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java", "status": "modified" }, { "diff": "@@ -644,6 +644,59 @@ public void testNumericDetectionDefault() throws Exception {\n assertThat(mapper, instanceOf(TextFieldMapper.class));\n }\n \n+ public void testDateDetectionInheritsFormat() throws Exception {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startArray(\"dynamic_date_formats\")\n+ .value(\"yyyy-MM-dd\")\n+ .endArray()\n+ .startArray(\"dynamic_templates\")\n+ .startObject()\n+ .startObject(\"dates\")\n+ .field(\"match_mapping_type\", \"date\")\n+ .field(\"match\", \"*2\")\n+ .startObject(\"mapping\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .startObject()\n+ .startObject(\"dates\")\n+ .field(\"match_mapping_type\", \"date\")\n+ .field(\"match\", \"*3\")\n+ .startObject(\"mapping\")\n+ .field(\"format\", \"yyyy-MM-dd||epoch_millis\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endArray()\n+ .endObject().endObject().string();\n+\n+ IndexService index = createIndex(\"test\");\n+ client().admin().indices().preparePutMapping(\"test\").setType(\"type\").setSource(mapping).get();\n+ DocumentMapper defaultMapper = index.mapperService().documentMapper(\"type\");\n+\n+ ParsedDocument doc = defaultMapper.parse(\"test\", \"type\", \"1\", XContentFactory.jsonBuilder()\n+ .startObject()\n+ .field(\"date1\", \"2016-11-20\")\n+ .field(\"date2\", \"2016-11-20\")\n+ .field(\"date3\", \"2016-11-20\")\n+ .endObject()\n+ .bytes());\n+ assertNotNull(doc.dynamicMappingsUpdate());\n+ assertAcked(client().admin().indices().preparePutMapping(\"test\").setType(\"type\").setSource(doc.dynamicMappingsUpdate().toString()).get());\n+\n+ defaultMapper = index.mapperService().documentMapper(\"type\");\n+\n+ DateFieldMapper dateMapper1 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper(\"date1\");\n+ DateFieldMapper dateMapper2 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper(\"date2\");\n+ DateFieldMapper dateMapper3 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper(\"date3\");\n+ // inherited from dynamic date format\n+ assertEquals(\"yyyy-MM-dd\", dateMapper1.fieldType().dateTimeFormatter().format());\n+ // inherited from dynamic date format since the mapping in the template did not specify a format\n+ assertEquals(\"yyyy-MM-dd\", dateMapper2.fieldType().dateTimeFormatter().format());\n+ // not inherited from the dynamic date format since the template defined an explicit format\n+ assertEquals(\"yyyy-MM-dd||epoch_millis\", dateMapper3.fieldType().dateTimeFormatter().format());\n+ }\n+\n public void testDynamicTemplateOrder() throws IOException {\n // https://github.com/elastic/elasticsearch/issues/18625\n // elasticsearch used to apply templates that do not have a match_mapping_type first", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.1.1\r\n\r\n**JVM version**: 1.8.0_111\r\n\r\n**OS version**: CentOS 6.8\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nWhen there are 2 documents, 1 with value 0.0 and the other with value -0.0, ES 5.1.1 no longer returns the documents when a range query is execute with either gt -0.0 or lt 0.0. Where as ES 2.4.3 would return the document with value 0.0 for gt -0.0 and return the document with value -0.0 for lt 0.0.\r\n\r\nlte and gte do work correctly, lte 0.0 and gte -0.0 return both documents\r\n\r\n**Steps to reproduce**:\r\n1. `curl -XPUT localhost:9200/double-test?pretty -d '{ \"mappings\": { \"double-values\": { \"properties\": { \"number\": { \"type\": \"double\" }}}}}'`\r\n```json\r\n{\r\n \"acknowledged\" : true,\r\n \"shards_acknowledged\" : true\r\n}\r\n```\r\n2. `curl -XPOST localhost:9200/double-testing/double-values?pretty -d '{ \"number\": 0.0 }'`\r\n```json\r\n{\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_B77DB0gWTRwMi9\",\r\n \"_version\" : 1,\r\n \"result\" : \"created\",\r\n \"_shards\" : {\r\n \"total\" : 2,\r\n \"successful\" : 1,\r\n \"failed\" : 0\r\n },\r\n \"created\" : true\r\n}\r\n```\r\n3. `curl -XPOST localhost:9200/double-testing/double-values?pretty -d '{ \"number\": -0.0 }'`\r\n```json \r\n{\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_087DB0gWTRwMi-\",\r\n \"_version\" : 1,\r\n \"result\" : \"created\",\r\n \"_shards\" : {\r\n \"total\" : 2,\r\n \"successful\" : 1,\r\n \"failed\" : 0\r\n },\r\n \"created\" : true\r\n}\r\n```\r\n4. `curl localhost:9200/double-testing/double-values/_search?pretty`\r\n```json\r\n{\r\n \"took\" : 2,\r\n \"timed_out\" : false,\r\n \"_shards\" : {\r\n \"total\" : 5,\r\n \"successful\" : 5,\r\n \"failed\" : 0\r\n },\r\n \"hits\" : {\r\n \"total\" : 2,\r\n \"max_score\" : 1.0,\r\n \"hits\" : [\r\n {\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_B77DB0gWTRwMi9\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"number\" : -0.0\r\n }\r\n },\r\n {\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_087DB0gWTRwMi-\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"number\" : 0.0\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n5. `curl localhost:9200/double-testing/double-values/_search?pretty -d '{ \"query\": { \"range\": { \"number\": { \"lt\": 0.0 }}}}'`\r\n```json\r\n{\r\n \"took\" : 2,\r\n \"timed_out\" : false,\r\n \"_shards\" : {\r\n \"total\" : 5,\r\n \"successful\" : 5,\r\n \"failed\" : 0\r\n },\r\n \"hits\" : {\r\n \"total\" : 0,\r\n \"max_score\" : null,\r\n \"hits\" : [ ]\r\n }\r\n}\r\n```\r\n6. `curl localhost:9200/double-testing/double-values/_search?pretty -d '{ \"query\": { \"range\": { \"number\": { \"lte\": 0.0 }}}}'`\r\n```json\r\n{\r\n \"took\" : 1,\r\n \"timed_out\" : false,\r\n \"_shards\" : {\r\n \"total\" : 5,\r\n \"successful\" : 5,\r\n \"failed\" : 0\r\n },\r\n \"hits\" : {\r\n \"total\" : 2,\r\n \"max_score\" : 1.0,\r\n \"hits\" : [\r\n {\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_B77DB0gWTRwMi9\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"number\" : -0.0\r\n }\r\n },\r\n {\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_087DB0gWTRwMi-\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"number\" : 0.0\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n7. `curl localhost:9200/double-testing/double-values/_search?pretty -d '{ \"query\": { \"range\": { \"number\": { \"gt\": -0.0 }}}}'`\r\n```json\r\n{\r\n \"took\" : 1,\r\n \"timed_out\" : false,\r\n \"_shards\" : {\r\n \"total\" : 5,\r\n \"successful\" : 5,\r\n \"failed\" : 0\r\n },\r\n \"hits\" : {\r\n \"total\" : 0,\r\n \"max_score\" : null,\r\n \"hits\" : [ ]\r\n }\r\n}\r\n```\r\n8. `curl localhost:9200/double-testing/double-values/_search?pretty -d '{ \"query\": { \"range\": { \"number\": { \"gte\": -0.0 }}}}'`\r\n```json\r\n{\r\n \"took\" : 2,\r\n \"timed_out\" : false,\r\n \"_shards\" : {\r\n \"total\" : 5,\r\n \"successful\" : 5,\r\n \"failed\" : 0\r\n },\r\n \"hits\" : {\r\n \"total\" : 2,\r\n \"max_score\" : 1.0,\r\n \"hits\" : [\r\n {\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_B77DB0gWTRwMi9\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"number\" : -0.0\r\n }\r\n },\r\n {\r\n \"_index\" : \"double-testing\",\r\n \"_type\" : \"double-values\",\r\n \"_id\" : \"AVj9f_087DB0gWTRwMi-\",\r\n \"_score\" : 1.0,\r\n \"_source\" : {\r\n \"number\" : 0.0\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```", "comments": [ { "body": "I opened an issue on the Lucene side: https://issues.apache.org/jira/browse/LUCENE-7594", "created_at": "2016-12-15T17:51:38Z" } ], "number": 22167, "title": "Handling of 0.0 and -0.0 in range (lt and gt) queries has changed from 2.4.3 > 5.1.1" }
{ "body": "Our `float`/`double` fields generally assume that `-0` compares less than `+0`,\r\nexcept when bounds are exclusive: an exclusive lower bound on `-0` excludes\r\n`+0` and an exclusive upper bound on `+0` excludes `-0`.\r\n\r\nCloses #22167", "number": 22173, "review_comments": [ { "body": "You should also test `nextUp(-0f)` ?:\r\n````\r\nassertEquals(\r\n NumberFieldMapper.NumberType.DOUBLE.rangeQuery(\"field\", -0f, null, false, false),\r\n NumberFieldMapper.NumberType.DOUBLE.rangeQuery(\"field\", +0f, null, true, false));\r\n````", "created_at": "2016-12-19T17:41:41Z" } ], "title": "Make `-0` compare less than `+0` consistently." }
{ "commits": [ { "message": "Make `-0` compare less than `+0` consistently.\n\nOur `float`/`double` fields generally assume that `-0` compares less than `+0`,\nexcept when bounds are exclusive: an exclusive lower bound on `-0` excludes\n`+0` and an exclusive upper bound on `+0` excludes `-0`.\n\nCloses #22167" }, { "message": "iter" } ], "files": [ { "diff": "@@ -186,6 +186,30 @@ Query termsQuery(String field, List<Object> values) {\n return HalfFloatPoint.newSetQuery(field, v);\n }\n \n+ private float nextDown(float f) {\n+ // HalfFloatPoint.nextDown considers that -0 is the same as +0\n+ // while point ranges are consistent with Float.compare, so\n+ // they consider that -0 < +0, so we explicitly make sure\n+ // that nextDown(+0) returns -0\n+ if (Float.floatToIntBits(f) == Float.floatToIntBits(0f)) {\n+ return -0f;\n+ } else {\n+ return HalfFloatPoint.nextDown(f);\n+ }\n+ }\n+\n+ private float nextUp(float f) {\n+ // HalfFloatPoint.nextUp considers that -0 is the same as +0\n+ // while point ranges are consistent with Float.compare, so\n+ // they consider that -0 < +0, so we explicitly make sure\n+ // that nextUp(-0) returns +0\n+ if (Float.floatToIntBits(f) == Float.floatToIntBits(-0f)) {\n+ return +0f;\n+ } else {\n+ return HalfFloatPoint.nextUp(f);\n+ }\n+ }\n+\n @Override\n Query rangeQuery(String field, Object lowerTerm, Object upperTerm,\n boolean includeLower, boolean includeUpper) {\n@@ -194,16 +218,16 @@ Query rangeQuery(String field, Object lowerTerm, Object upperTerm,\n if (lowerTerm != null) {\n l = parse(lowerTerm);\n if (includeLower) {\n- l = Math.nextDown(l);\n+ l = nextDown(l);\n }\n l = HalfFloatPoint.nextUp(l);\n }\n if (upperTerm != null) {\n u = parse(upperTerm);\n if (includeUpper) {\n- u = Math.nextUp(u);\n+ u = nextUp(u);\n }\n- u = HalfFloatPoint.nextDown(u);\n+ u = nextDown(u);\n }\n return HalfFloatPoint.newRangeQuery(field, l, u);\n }\n@@ -276,6 +300,30 @@ Query termsQuery(String field, List<Object> values) {\n return FloatPoint.newSetQuery(field, v);\n }\n \n+ private float nextDown(float f) {\n+ // Math.nextDown considers that -0 is the same as +0\n+ // while point ranges are consistent with Float.compare, so\n+ // they consider that -0 < +0, so we explicitly make sure\n+ // that nextDown(+0) returns -0\n+ if (Float.floatToIntBits(f) == Float.floatToIntBits(0f)) {\n+ return -0f;\n+ } else {\n+ return Math.nextDown(f);\n+ }\n+ }\n+\n+ private float nextUp(float f) {\n+ // Math.nextUp considers that -0 is the same as +0\n+ // while point ranges are consistent with Float.compare, so\n+ // they consider that -0 < +0, so we explicitly make sure\n+ // that nextUp(-0) returns +0\n+ if (Float.floatToIntBits(f) == Float.floatToIntBits(-0f)) {\n+ return +0f;\n+ } else {\n+ return Math.nextUp(f);\n+ }\n+ }\n+\n @Override\n Query rangeQuery(String field, Object lowerTerm, Object upperTerm,\n boolean includeLower, boolean includeUpper) {\n@@ -284,13 +332,13 @@ Query rangeQuery(String field, Object lowerTerm, Object upperTerm,\n if (lowerTerm != null) {\n l = parse(lowerTerm);\n if (includeLower == false) {\n- l = Math.nextUp(l);\n+ l = nextUp(l);\n }\n }\n if (upperTerm != null) {\n u = parse(upperTerm);\n if (includeUpper == false) {\n- u = Math.nextDown(u);\n+ u = nextDown(u);\n }\n }\n return FloatPoint.newRangeQuery(field, l, u);\n@@ -364,6 +412,30 @@ Query termsQuery(String field, List<Object> values) {\n return DoublePoint.newSetQuery(field, v);\n }\n \n+ private double nextDown(double d) {\n+ // Math.nextDown considers that -0 is the same as +0\n+ // while point ranges are consistent with Double.compare, so\n+ // they consider that -0 < +0, so we explicitly make sure\n+ // that nextDown(+0) returns -0\n+ if (Double.doubleToLongBits(d) == Double.doubleToLongBits(0d)) {\n+ return -0d;\n+ } else {\n+ return Math.nextDown(d);\n+ }\n+ }\n+\n+ private double nextUp(double d) {\n+ // Math.nextUp considers that -0 is the same as +0\n+ // while point ranges are consistent with Double.compare, so\n+ // they consider that -0 < +0, so we explicitly make sure\n+ // that nextUp(-0) returns +0\n+ if (Double.doubleToLongBits(d) == Double.doubleToLongBits(-0d)) {\n+ return +0d;\n+ } else {\n+ return Math.nextUp(d);\n+ }\n+ }\n+\n @Override\n Query rangeQuery(String field, Object lowerTerm, Object upperTerm,\n boolean includeLower, boolean includeUpper) {\n@@ -372,13 +444,13 @@ Query rangeQuery(String field, Object lowerTerm, Object upperTerm,\n if (lowerTerm != null) {\n l = parse(lowerTerm);\n if (includeLower == false) {\n- l = Math.nextUp(l);\n+ l = nextUp(l);\n }\n }\n if (upperTerm != null) {\n u = parse(upperTerm);\n if (includeUpper == false) {\n- u = Math.nextDown(u);\n+ u = nextDown(u);\n }\n }\n return DoublePoint.newRangeQuery(field, l, u);", "filename": "core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java", "status": "modified" }, { "diff": "@@ -149,4 +149,20 @@ public void testHalfFloatRange() throws IOException {\n }\n IOUtils.close(reader, dir);\n }\n+\n+ public void testNegativeZero() {\n+ assertEquals(\n+ NumberType.DOUBLE.rangeQuery(\"field\", null, -0d, true, true),\n+ NumberType.DOUBLE.rangeQuery(\"field\", null, +0d, true, false));\n+ assertEquals(\n+ NumberType.FLOAT.rangeQuery(\"field\", null, -0f, true, true),\n+ NumberType.FLOAT.rangeQuery(\"field\", null, +0f, true, false));\n+ assertEquals(\n+ NumberType.HALF_FLOAT.rangeQuery(\"field\", null, -0f, true, true),\n+ NumberType.HALF_FLOAT.rangeQuery(\"field\", null, +0f, true, false));\n+\n+ assertFalse(NumberType.DOUBLE.termQuery(\"field\", -0d).equals(NumberType.DOUBLE.termQuery(\"field\", +0d)));\n+ assertFalse(NumberType.FLOAT.termQuery(\"field\", -0f).equals(NumberType.FLOAT.termQuery(\"field\", +0f)));\n+ assertFalse(NumberType.HALF_FLOAT.termQuery(\"field\", -0f).equals(NumberType.HALF_FLOAT.termQuery(\"field\", +0f)));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java", "status": "modified" }, { "diff": "@@ -39,6 +39,12 @@ PUT my_index\n --------------------------------------------------\n // CONSOLE\n \n+NOTE: The `double`, `float` and `half_float` types consider that `-0.0` and\n+`+0.0` are different values. As a consequence, doing a `term` query on\n+`-0.0` will not match `+0.0` and vice-versa. Same is true for range queries:\n+if the upper bound is `-0.0` then `+0.0` will not match, and if the lower\n+bound is `+0.0` then `-0.0` will not match.\n+\n ==== Which type should I use?\n \n As far as integer types (`byte`, `short`, `integer` and `long`) are concerned,", "filename": "docs/reference/mapping/types/numeric.asciidoc", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0\r\n\r\n**Plugins installed**: []\r\n\r\n**JVM version**:\r\n\r\n**OS version**: OS X 10.11.6\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nI'm dealing with a schema that relates, companies to branches to employees,\r\n\r\nso I'm using a parent -> child -> grand-child approach with the following mapping: \r\n\r\n\r\n**Steps to reproduce**:\r\n 1. insert mapping\r\n\r\n```\r\nPUT 127.0.0.1:9200/elastic_issue\r\n\r\n{\r\n\t\"mappings\": {\r\n\t\t\"company\": {\r\n\t\t\t\"properties\": {\r\n\t\t\t\t\"name\": {\r\n\t\t\t\t\t\"type\": \"keyword\"\r\n\t\t\t\t},\r\n\t\t\t\t\"company_id\": {\r\n\t\t\t\t\t\"type\": \"long\"\r\n\t\t\t\t},\r\n\t\t\t\t\"state\": {\r\n\t\t\t\t\t\"type\": \"keyword\"\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t},\r\n\t\t\"branch\": {\r\n\t\t\t\"_parent\": {\r\n\t\t\t\t\"type\": \"company\"\r\n\t\t\t},\r\n\t\t\t\"properties\": {\r\n\t\t\t\t\"opening_date\": {\r\n\t\t\t\t\t\"type\": \"date\",\r\n\t\t\t\t\t\"format\": \"epoch_millis\"\r\n\t\t\t\t},\r\n \"opening_date_long\": {\r\n\t\t\t\t\t\"type\": \"long\"\r\n\t\t\t\t}\r\n\t\t\t\t\"name\": {\r\n\t\t\t\t\t\"type\": \"keyword\"\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t},\r\n\t\t\"employee\": {\r\n\t\t\t\"_parent\": {\r\n\t\t\t\t\"type\": \"branch\"\r\n\t\t\t},\r\n\t\t\t\"properties\": {\r\n\t\t\t\t\"name\": {\r\n\t\t\t\t\t\"type\": \"keyword\"\r\n\t\t\t\t},\r\n\t\t\t\t\"salary\": {\r\n\t\t\t\t\t\"type\": \"long\"\r\n\t\t\t\t},\r\n\t\t\t\t\"bonus\": {\r\n\t\t\t\t\t\"type\": \"long\"\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n```\r\n \r\n2. insert data for company, branch and employee\r\n\r\n```\r\n127.0.0.1:9200/elastic_issue/company/1\r\n\r\n{\r\n\t\"name\": \"blue sky\",\r\n\t\"company_id\": 1,\r\n\t\"state\": \"ON\"\r\n}\r\n\r\n127.0.0.1:9200/elastic_issue/branch/11?parent=1&routing=1\r\n\r\n{\r\n\t\"opening_date\": 1481624898000,\r\n\t\"opening_date_long\": 1481624898000,\r\n\t\"name\": \"zalka\"\r\n}\r\n\r\n127.0.0.1:9200/elastic_issue/employee/xyz?parent=11&routing=1\r\n\r\n{ \r\n\t\"name\": \"rick\",\r\n\t\"salary\": 1500,\r\n\t\"bonus\": 300\r\n}\r\n```\r\n\r\nMY requirements heavily depend on scoring on each level.\r\n\r\nwhat I'm expecting to achieve is the following: \r\n\r\nemployees that have a salary of a certain range, will take a specific weight value,\r\nsame for their bonus and the company they work for, etc..\r\n\r\nthe forumla is:\r\n\r\nscore = salary weight + bonus weight + company weight + state wight\r\n\r\n3. query\r\n\r\n```\r\n\r\n{\r\n\t\"query\": {\r\n\t\t\"function_score\": {\r\n\t\t\t\"score_mode\": \"sum\",\r\n\t\t\t\"boost_mode\": \"sum\",\r\n\t\t\t\"query\": {\r\n\t\t\t\t\"bool\": {\r\n\t\t\t\t\t\"must\": {\r\n\t\t\t\t\t\t\"has_child\": {\r\n\t\t\t\t\t\t\t\"type\": \"branch\",\r\n\t\t\t\t\t\t\t\"score_mode\": \"max\",\r\n\t\t\t\t\t\t\t\"inner_hits\": {\r\n\t\t\t\t\t\t\t \"size\": 1\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\"query\": {\r\n\t\t\t\t\t\t\t\t\"function_score\": {\r\n\t\t\t\t\t\t\t\t\t\"query\": {\r\n\t\t\t\t\t\t\t\t\t\t\"bool\": {\r\n\t\t\t\t\t\t\t\t\t\t\t\"must\": {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"has_child\": {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"employee\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"score_mode\": \"max\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"inner_hits\": {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"size\": 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"query\": {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"function_score\": {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"score_mode\": \"sum\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"boost_mode\": \"sum\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"functions\": [\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\"filter\":{\"range\":{\"salary\":{\"gte\":0,\"lt\":1000}}},\"weight\":1},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\"filter\":{\"range\":{\"salary\":{\"gte\":1000,\"lt\":2000}}},\"weight\":2},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\"filter\":{\"range\":{\"bonus\":{\"gte\":0,\"lt\":33}}},\"weight\":2},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\"filter\":{\"range\":{\"bonus\":{\"gte\":33,\"lt\":66}}},\"weight\":3},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\"filter\":{\"range\":{\"bonus\":{\"gte\":66,\"lt\":100}}},\"weight\":1}\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\t\t\t\t\"filter\": [\r\n\t\t\t\t\t\t\t\t\t\t\t {\r\n\t\t\t\t\t\t\t\t\t\t\t \"range\": {\r\n\t\t\t\t\t\t\t\t\t\t\t \"opening_date\": {\r\n\t\t\t\t\t\t\t\t\t\t\t \"gte\": 1481624898000\r\n\t\t\t\t\t\t\t\t\t\t\t }\r\n\t\t\t\t\t\t\t\t\t\t\t }\r\n\t\t\t\t\t\t\t\t\t\t\t }\r\n\t\t\t\t\t\t\t\t\t\t\t ]\r\n\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t},\r\n\t\t\t\"functions\": [\r\n\t\t\t\t{\r\n\t\t\t\t\t\"filter\": {\r\n\t\t\t\t\t\t\"terms\": {\r\n\t\t\t\t\t\t\t\"company_id\": [1]\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t},\r\n\t\t\t\t\t\"weight\": 2\r\n\t\t\t\t},\r\n\t\t\t\t{\r\n\t\t\t\t\t\"filter\": {\r\n\t\t\t\t\t\t\"terms\": {\r\n\t\t\t\t\t\t\t\"state\": [\"ON\"]\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t},\r\n\t\t\t\t\t\"weight\": 1\r\n\t\t\t\t}\r\n\t\t\t]\r\n\t\t}\r\n\t},\r\n\t\"track_scores\": true\r\n}\r\n```\r\n\r\nat this point the employees scores are being added and passed to the parent - branch,\r\n\r\nbut when it reaches the company level the scoring breaks and the child score (branch) gets **multiplied** by the sum of the company score instead of being **summed up**\r\n\r\nthe reason behind this is the **date range filter** that is on the branch level \r\n```\r\n{\r\n\t\"filter\": [{\r\n\t\t\"range\": {\r\n\t\t\t\"opening_date\": {\r\n\t\t\t\t\"gte\": 1481624898000\r\n\t\t\t}\r\n\t\t}\r\n\t}]\r\n}\r\n```\r\n\r\nwhen mapping this attribute to **date** it breaks the parent score function.\r\n\r\nhowever if replaced by mapping **long** mapping it works all fine,\r\n\r\nso you can try:\r\n\r\n```\r\n{\r\n\t\"filter\": [{\r\n\t\t\"range\": {\r\n\t\t\t\"opening_date_long\": {\r\n\t\t\t\t\"gte\": 1481624898000\r\n\t\t\t}\r\n\t\t}\r\n\t}]\r\n}\r\n```\r\n\r\nthis works fine.\r\n\r\nfor me it seems that the date range filter is breaking the function_score \"boost_mode\": \"sum\" and forcing the child score to get **multiplied** by its parent opposed to a custom mode, \"sum\", \"avg\", etc...\r\n\r\nI tried reproducing this on another machine and it gave the same results\r\n\r\nusing the explain API I realised: \r\n\r\n```\r\n127.0.0.1:9200/elastic_issue/_search?explain=true\r\n\r\n{\r\n\t\"_shard\": \"[elastic_issue][3]\",\r\n\t\"_node\": \"5rSiUGoaRQi3agxKCMJ31g\",\r\n\t\"_index\": \"elastic_issue\",\r\n\t\"_type\": \"company\",\r\n\t\"_id\": \"1\",\r\n\t\"_score\": 9,\r\n\t\"_source\": {\r\n\t\t\"name\": \"blue sky\",\r\n\t\t\"company_id\": 1,\r\n\t\t\"state\": \"ON\"\r\n\t},\r\n\t\"_explanation\": {\r\n\t\t\"value\": 9,\r\n\t\t\"description\": \"function score, product of:\",\r\n\t\t\"details\": [{\r\n\t\t\t\"value\": 3,\r\n\t\t\t\"description\": \"A match, join value 1\",\r\n\t\t\t\"details\": []\r\n\t\t}, {\r\n\t\t\t\"value\": 3,\r\n\t\t\t\"description\": \"min of:\",\r\n\t\t\t\"details\": [{\r\n\t\t\t\t\"value\": 3,\r\n\t\t\t\t\"description\": \"function score, score mode [sum]\",\r\n\t\t\t\t\"details\": [{\r\n\t\t\t\t\t\"value\": 2,\r\n\t\t\t\t\t\"description\": \"function score, product of:\",\r\n\t\t\t\t\t\"details\": [{\r\n\t\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\t\"description\": \"match filter: company_id:[1 TO 1]\",\r\n\t\t\t\t\t\t\"details\": []\r\n\t\t\t\t\t}, {\r\n\t\t\t\t\t\t\"value\": 2,\r\n\t\t\t\t\t\t\"description\": \"product of:\",\r\n\t\t\t\t\t\t\"details\": [{\r\n\t\t\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\t\t\"description\": \"constant score 1.0 - no function provided\",\r\n\t\t\t\t\t\t\t\"details\": []\r\n\t\t\t\t\t\t}, {\r\n\t\t\t\t\t\t\t\"value\": 2,\r\n\t\t\t\t\t\t\t\"description\": \"weight\",\r\n\t\t\t\t\t\t\t\"details\": []\r\n\t\t\t\t\t\t}]\r\n\t\t\t\t\t}]\r\n\t\t\t\t}, {\r\n\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\"description\": \"function score, product of:\",\r\n\t\t\t\t\t\"details\": [{\r\n\t\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\t\"description\": \"match filter: state:ON\",\r\n\t\t\t\t\t\t\"details\": []\r\n\t\t\t\t\t}, {\r\n\t\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\t\"description\": \"product of:\",\r\n\t\t\t\t\t\t\"details\": [{\r\n\t\t\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\t\t\"description\": \"constant score 1.0 - no function provided\",\r\n\t\t\t\t\t\t\t\"details\": []\r\n\t\t\t\t\t\t}, {\r\n\t\t\t\t\t\t\t\"value\": 1,\r\n\t\t\t\t\t\t\t\"description\": \"weight\",\r\n\t\t\t\t\t\t\t\"details\": []\r\n\t\t\t\t\t\t}]\r\n\t\t\t\t\t}]\r\n\t\t\t\t}]\r\n\t\t\t}, {\r\n\t\t\t\t\"value\": 3.4028235e+38,\r\n\t\t\t\t\"description\": \"maxBoost\",\r\n\t\t\t\t\"details\": []\r\n\t\t\t}]\r\n\t\t}]\r\n\t},\r\n\t\"inner_hits\": {\r\n\t\t\"branch\": {\r\n\t\t\t\"hits\": {\r\n\t\t\t\t\"total\": 1,\r\n\t\t\t\t\"max_score\": 3,\r\n\t\t\t\t\"hits\": [{\r\n\t\t\t\t\t\"_type\": \"branch\",\r\n\t\t\t\t\t\"_id\": \"11\",\r\n\t\t\t\t\t\"_score\": 3,\r\n\t\t\t\t\t\"_routing\": \"1\",\r\n\t\t\t\t\t\"_parent\": \"1\",\r\n\t\t\t\t\t\"_source\": {\r\n\t\t\t\t\t\t\"opening_date\": 1481624898000,\r\n\t\t\t\t\t\t\"opening_date_long\": 1481624898000,\r\n\t\t\t\t\t\t\"name\": \"zalka\"\r\n\t\t\t\t\t},\r\n\t\t\t\t\t\"inner_hits\": {\r\n\t\t\t\t\t\t\"employee\": {\r\n\t\t\t\t\t\t\t\"hits\": {\r\n\t\t\t\t\t\t\t\t\"total\": 1,\r\n\t\t\t\t\t\t\t\t\"max_score\": 3,\r\n\t\t\t\t\t\t\t\t\"hits\": [{\r\n\t\t\t\t\t\t\t\t\t\"_type\": \"employee\",\r\n\t\t\t\t\t\t\t\t\t\"_id\": \"xyz\",\r\n\t\t\t\t\t\t\t\t\t\"_score\": 3,\r\n\t\t\t\t\t\t\t\t\t\"_routing\": \"1\",\r\n\t\t\t\t\t\t\t\t\t\"_parent\": \"11\",\r\n\t\t\t\t\t\t\t\t\t\"_source\": {\r\n\t\t\t\t\t\t\t\t\t\t\"name\": \"rick\",\r\n\t\t\t\t\t\t\t\t\t\t\"salary\": 1500,\r\n\t\t\t\t\t\t\t\t\t\t\"bonus\": 300\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t}]\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}]\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n``` \r\n\r\nin the _explanation: \r\n\r\nthe first description is being: \r\n\r\n\"description\": \"function score, product of:\"\r\n\r\nmeaning its a multiplication factor although I'm forcing \"boost_mode\": \"sum\"\r\n\r\nwhereas changing the filter to \"opening_date_long\" *long* instead of *date*\r\n\r\nthe first description will change to: \r\n\r\n\"description\": \"sum of\",\r\n\r\nworks fine again.\r\n\r\nnasty!\r\n\r\n", "comments": [ { "body": "I've reduced this to the following:\r\n\r\n```\r\nPUT elastic_issue\r\n{\r\n \"mappings\": {\r\n \"branch\": {\r\n \"properties\": {\r\n \"opening_date\": {\r\n \"type\": \"date\",\r\n \"format\": \"epoch_millis\"\r\n },\r\n \"opening_date_long\": {\r\n \"type\": \"long\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPUT elastic_issue/branch/11\r\n{\r\n\t\"opening_date\": 1481624898000,\r\n\t\"opening_date_long\": 1481624898000,\r\n\t\"name\": \"zalka\"\r\n}\r\n\r\nGET elastic_issue/_search?explain\r\n{\r\n \"query\": {\r\n \"function_score\": {\r\n \"score_mode\": \"sum\",\r\n \"boost_mode\": \"replace\",\r\n \"query\": {\r\n \"bool\": {\r\n \"must\": [\r\n {\r\n \"range\": {\r\n \"opening_date\": {\r\n \"gte\": 1481624898000\r\n }\r\n }\r\n },\r\n {\r\n \"constant_score\": {\r\n \"filter\": {\r\n \"match_all\": {}\r\n },\r\n \"boost\": 200\r\n }\r\n }\r\n ]\r\n }\r\n },\r\n \"functions\": [\r\n {\r\n \"filter\": {\r\n \"match_all\": {}\r\n },\r\n \"weight\": 3\r\n }\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n\r\nThe explanation from the above starts with:\r\n\r\n```\r\n \"_explanation\": {\r\n \"value\": 603,\r\n \"description\": \"function score, product of:\",\r\n```\r\n\r\neven though the function score says `boost_mode: sum, score_mode: sum`.\r\n\r\nif you change the range query to run on `opening_date` instead of `opening_date_long`, then the explanation changes to:\r\n\r\n```\r\n \"_explanation\": {\r\n \"value\": 3,\r\n \"description\": \"min of:\",\r\n \"details\": [\r\n```\r\n\r\nwhich doesn't seem right either", "created_at": "2016-12-14T12:17:35Z" }, { "body": "@jimczi could you take a look at this please?", "created_at": "2016-12-14T12:18:16Z" }, { "body": "btw @hrahal - regardless of this bug, I would definitely not try to solve your problem using parent-child. You're essentially trying to impose a relational design onto elasticsearch, and it is bound to fail. Instead, denormalise your data.", "created_at": "2016-12-14T12:25:21Z" }, { "body": "This one is funky. In 5.x we added a rewrite method on the query builders (like the one for Lucene query) in order to be able to optimize some queries at the ES level. In this example you have a range query on a date field and since the index contains only one document the range query builder is rewritten in a match all docs query. When the inner query of the FunctionScoreQueryBuilder is rewritten we create a new object that contains the new query plus the other parameters of the FunctionScoreQueryBuilder. The `boost_mode` was just ignored during the copy so the default value (which is SUM) is applied.\r\nI opened https://github.com/elastic/elasticsearch/issues/22172 to fix this issue.", "created_at": "2016-12-14T16:10:53Z" } ], "number": 22138, "title": "filtering a date of a child, breaks boost_mode on the parent level" }
{ "body": "This change fixes the cloning of the FunctionScoreQueryBuilder when the inner query or functions are rewritten.\r\n\r\nFixes #22138", "number": 22172, "review_comments": [], "title": "Fix boost_mode propagation when the function score query builder is rewritten" }
{ "commits": [ { "message": "Fix boost_mode propagation when the function score query builder is rewritten\n\nThis change fixes the cloning of the FunctionScoreQueryBuilder when the inner query or functions are rewritten.\n\nFixes #22138" } ], "files": [ { "diff": "@@ -425,6 +425,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws\n newQueryBuilder.scoreMode = scoreMode;\n newQueryBuilder.minScore = minScore;\n newQueryBuilder.maxBoost = maxBoost;\n+ newQueryBuilder.boostMode = boostMode;\n return newQueryBuilder;\n }\n return this;", "filename": "core/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java", "status": "modified" }, { "diff": "@@ -654,11 +654,19 @@ public void testMustRewrite() throws IOException {\n }\n \n public void testRewrite() throws IOException {\n- FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(\n- new WrapperQueryBuilder(new TermQueryBuilder(\"foo\", \"bar\").toString()));\n+ FunctionScoreQueryBuilder functionScoreQueryBuilder =\n+ new FunctionScoreQueryBuilder(new WrapperQueryBuilder(new TermQueryBuilder(\"foo\", \"bar\").toString()))\n+ .boostMode(CombineFunction.REPLACE)\n+ .scoreMode(FiltersFunctionScoreQuery.ScoreMode.SUM)\n+ .setMinScore(1)\n+ .maxBoost(100);\n FunctionScoreQueryBuilder rewrite = (FunctionScoreQueryBuilder) functionScoreQueryBuilder.rewrite(createShardContext());\n assertNotSame(functionScoreQueryBuilder, rewrite);\n assertEquals(rewrite.query(), new TermQueryBuilder(\"foo\", \"bar\"));\n+ assertEquals(rewrite.boostMode(), CombineFunction.REPLACE);\n+ assertEquals(rewrite.scoreMode(), FiltersFunctionScoreQuery.ScoreMode.SUM);\n+ assertEquals(rewrite.getMinScore(), 1f, 0.0001);\n+ assertEquals(rewrite.maxBoost(), 100f, 0.0001);\n }\n \n public void testRewriteWithFunction() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:5.1.1\r\n\r\nBelow are two documents of type 'book' which have the fields title, year and yearint. \r\n\r\n```\r\nPUT filteryear\r\n{\r\n \"settings\": {\r\n \"number_of_shards\": 1\r\n },\r\n \"mappings\": {\r\n \"book\": {\r\n \"properties\": {\r\n \"title\": {\r\n \"type\": \"keyword\"\r\n },\r\n \"year\": {\r\n \"type\": \"keyword\"\r\n }, \r\n \"yearint\": {\r\n \"type\": \"integer\"\r\n }\r\n \r\n }\r\n }\r\n }\r\n}\r\n```\r\nDocuments:\r\n\r\n```\r\nPUT filteryear/book/1\r\n{\r\n\"title\": \"Banana\",\r\n\"year\": \"2007\",\r\n\"yearint\": 2007\r\n}\r\n\r\nPUT filteryear/book/2\r\n{\r\n\"title\": \"Mango\",\r\n\"year\": \"2008\",\r\n\"yearint\": 2008\r\n}\r\n```\r\n\r\nThe following is a term aggregation on the field \"year\" (mapped as String).\r\n\r\n```\r\nPOST filteryear/book/_search\r\n{\r\n \"from\" : 0,\r\n \"size\" : 0,\r\n \"query\" : {\r\n \"bool\" : {\r\n }\r\n },\r\n \"aggregations\" : {\r\n\"YearsSelected\" : {\r\n \"filter\" : {\r\n \"bool\" : {\r\n \"disable_coord\" : false,\r\n \"adjust_pure_negative\" : true,\r\n \"boost\" : 1.0\r\n }\r\n },\r\n \"aggregations\" : {\r\n \"termFieldAggregation\" : {\r\n \"terms\" : {\r\n \"field\" : \"year\",\r\n \"size\" : 2,\r\n \"shard_size\" : -1,\r\n \"min_doc_count\" : 0,\r\n \"shard_min_doc_count\" : 0,\r\n \"show_term_doc_count_error\" : false,\r\n \"include\" : [\r\n \"2007\"\r\n ]\r\n }\r\n }\r\n }\r\n },\r\n \"YearsNonSelected\" : {\r\n \"filter\" : {\r\n \"bool\" : {\r\n \"disable_coord\" : false,\r\n \"adjust_pure_negative\" : true,\r\n \"boost\" : 1.0\r\n }\r\n },\r\n \"aggregations\" : {\r\n \"termFieldAggregation\" : {\r\n \"terms\" : {\r\n \"field\" : \"year\",\r\n \"size\" : 5,\r\n \"shard_size\" : -1,\r\n \"min_doc_count\" : 1,\r\n \"shard_min_doc_count\" : 0,\r\n \"show_term_doc_count_error\" : false,\r\n \"exclude\" : [\r\n \"2007\"\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n },\r\n \"ext\" : { }\r\n}\r\n```\r\n\r\nResult:\r\n\r\n```\r\n{\r\n \"took\": 1,\r\n \"timed_out\": false,\r\n \"_shards\": {\r\n \"total\": 1,\r\n \"successful\": 1,\r\n \"failed\": 0\r\n },\r\n \"hits\": {\r\n \"total\": 2,\r\n \"max_score\": 0,\r\n \"hits\": []\r\n },\r\n \"aggregations\": {\r\n \"YearsSelected\": {\r\n \"doc_count\": 2,\r\n \"termFieldAggregation\": {\r\n \"doc_count_error_upper_bound\": 0,\r\n \"sum_other_doc_count\": 0,\r\n \"buckets\": [\r\n {\r\n \"key\": \"2007\",\r\n \"doc_count\": 1\r\n }\r\n ]\r\n }\r\n },\r\n \"YearsNonSelected\": {\r\n \"doc_count\": 2,\r\n \"termFieldAggregation\": {\r\n \"doc_count_error_upper_bound\": 0,\r\n \"sum_other_doc_count\": 0,\r\n \"buckets\": [\r\n {\r\n \"key\": \"2008\",\r\n \"doc_count\": 1\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}\r\n```\r\nAs you can see the \"YearsSelected\" aggregation contains only the bucket, which was specified in the include (\"2007\"). \"YearsNonSelected\" aggregation contains all years except of \"2007\".\r\n\r\n\r\nNow the same aggregation, but on the field \"yearint\" (mapped as integer).\r\n\r\n```\r\nPOST filteryear/book/_search\r\n{\r\n \"from\" : 0,\r\n \"size\" : 0,\r\n \"query\" : {\r\n \"bool\" : {\r\n }\r\n },\r\n \"aggregations\" : {\r\n\"YearsSelected\" : {\r\n \"filter\" : {\r\n \"bool\" : {\r\n \"disable_coord\" : false,\r\n \"adjust_pure_negative\" : true,\r\n \"boost\" : 1.0\r\n }\r\n },\r\n \"aggregations\" : {\r\n \"termFieldAggregation\" : {\r\n \"terms\" : {\r\n \"field\" : \"yearint\",\r\n \"size\" : 2,\r\n \"shard_size\" : -1,\r\n \"min_doc_count\" : 0,\r\n \"shard_min_doc_count\" : 0,\r\n \"show_term_doc_count_error\" : false,\r\n \"order\" : [\r\n {\r\n \"_term\" : \"desc\"\r\n }\r\n ],\r\n \"include\" : [\r\n \"2007\"\r\n ]\r\n }\r\n }\r\n }\r\n },\r\n \"YearsNonSelected\" : {\r\n \"filter\" : {\r\n \"bool\" : {\r\n \"disable_coord\" : false,\r\n \"adjust_pure_negative\" : true,\r\n \"boost\" : 1.0\r\n }\r\n },\r\n \"aggregations\" : {\r\n \"termFieldAggregation\" : {\r\n \"terms\" : {\r\n \"field\" : \"yearint\",\r\n \"size\" : 5,\r\n \"shard_size\" : -1,\r\n \"min_doc_count\" : 1,\r\n \"shard_min_doc_count\" : 0,\r\n \"show_term_doc_count_error\" : false,\r\n \"order\" : [\r\n {\r\n \"_term\" : \"desc\"\r\n }\r\n ],\r\n \"exclude\" : [\r\n \"2007\"\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n },\r\n \"ext\" : { }\r\n}\r\n```\r\n\r\nResult: \r\n\r\n```\r\n{\r\n \"took\": 1,\r\n \"timed_out\": false,\r\n \"_shards\": {\r\n \"total\": 1,\r\n \"successful\": 1,\r\n \"failed\": 0\r\n },\r\n \"hits\": {\r\n \"total\": 2,\r\n \"max_score\": 0,\r\n \"hits\": []\r\n },\r\n \"aggregations\": {\r\n \"YearsSelected\": {\r\n \"doc_count\": 2,\r\n \"termFieldAggregation\": {\r\n \"doc_count_error_upper_bound\": 0,\r\n \"sum_other_doc_count\": 0,\r\n \"buckets\": [\r\n {\r\n \"key\": 2008,\r\n \"doc_count\": 0\r\n },\r\n {\r\n \"key\": 2007,\r\n \"doc_count\": 1\r\n }\r\n ]\r\n }\r\n },\r\n \"YearsNonSelected\": {\r\n \"doc_count\": 2,\r\n \"termFieldAggregation\": {\r\n \"doc_count_error_upper_bound\": 0,\r\n \"sum_other_doc_count\": 0,\r\n \"buckets\": [\r\n {\r\n \"key\": 2008,\r\n \"doc_count\": 1\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nThe \"YearsSelected\" aggregation now contains two buckets \"2007\"and \"2008\", although it should only contain the included term \"2007\".\r\n\r\nThe only difference is the type of the field I use for the aggregation, shouldn't the result be the same? \r\n\r\nBtw, it doesn't make any difference if we specify 2007 or \"2007\" in the include.\r\n\r\nMy current fix is to use min_doc_count = 1, so that all buckets with docCount=0 are excluded.", "comments": [ { "body": "Thanks @becjon . This is indeed a bug, I opened https://github.com/elastic/elasticsearch/issues/22141", "created_at": "2016-12-13T14:10:21Z" } ], "number": 22140, "title": "Include in term aggregation not working on integer field" }
{ "body": "For minDocCount=0 the numeric terms aggregator should also check the includes/excludes when buckets with empty count are added to the result.\r\nThis change fixes this bug and adds a test for it.\r\n\r\nFixes #22140", "number": 22141, "review_comments": [], "title": "Fix numeric terms aggregations with includes/excludes and minDocCount=0" }
{ "commits": [ { "message": "Fix numeric terms aggregations with includes/excludes and minDocCount=0\n\nFor minDocCount=0 the numeric terms aggregator should also check the includes/excludes when buckets with empty count are added to the result.\nThis change fixes this bug and adds a test for it.\n\nFixes #22140" }, { "message": "Fix line length" } ], "files": [ { "diff": "@@ -113,7 +113,10 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE\n values.setDocument(docId);\n final int valueCount = values.count();\n for (int i = 0; i < valueCount; ++i) {\n- bucketOrds.add(values.valueAt(i));\n+ long value = values.valueAt(i);\n+ if (longFilter == null || longFilter.accept(value)) {\n+ bucketOrds.add(value);\n+ }\n }\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java", "status": "modified" }, { "diff": "@@ -305,41 +305,53 @@ public void testSingleValueFieldWithFiltering() throws Exception {\n long includes[] = { 1, 2, 3, 98 };\n long excludes[] = { -1, 2, 4 };\n long empty[] = {};\n- testIncludeExcludeResults(includes, empty, new long[] { 1, 2, 3 });\n- testIncludeExcludeResults(includes, excludes, new long[] { 1, 3 });\n- testIncludeExcludeResults(empty, excludes, new long[] { 0, 1, 3 });\n+ testIncludeExcludeResults(1, includes, empty, new long[] { 1, 2, 3 }, new long[0]);\n+ testIncludeExcludeResults(1, includes, excludes, new long[] { 1, 3 }, new long[0]);\n+ testIncludeExcludeResults(1, empty, excludes, new long[] { 0, 1, 3 }, new long[0]);\n+\n+ testIncludeExcludeResults(0, includes, empty, new long[] { 1, 2, 3}, new long[] { 98 });\n+ testIncludeExcludeResults(0, includes, excludes, new long[] { 1, 3 }, new long[] { 98 });\n+ testIncludeExcludeResults(0, empty, excludes, new long[] { 0, 1, 3 }, new long[] {5, 6, 7, 8, 9, 10, 11});\n }\n \n- private void testIncludeExcludeResults(long[] includes, long[] excludes, long[] expecteds) {\n+ private void testIncludeExcludeResults(int minDocCount, long[] includes, long[] excludes,\n+ long[] expectedWithCounts, long[] expectedZeroCounts) {\n SearchResponse response = client().prepareSearch(\"idx\").setTypes(\"type\")\n .addAggregation(terms(\"terms\")\n .field(SINGLE_VALUED_FIELD_NAME)\n .includeExclude(new IncludeExclude(includes, excludes))\n- .collectMode(randomFrom(SubAggCollectionMode.values())))\n+ .collectMode(randomFrom(SubAggCollectionMode.values()))\n+ .minDocCount(minDocCount))\n .execute().actionGet();\n assertSearchResponse(response);\n Terms terms = response.getAggregations().get(\"terms\");\n assertThat(terms, notNullValue());\n assertThat(terms.getName(), equalTo(\"terms\"));\n- assertThat(terms.getBuckets().size(), equalTo(expecteds.length));\n+ assertThat(terms.getBuckets().size(), equalTo(expectedWithCounts.length + expectedZeroCounts.length));\n \n- for (int i = 0; i < expecteds.length; i++) {\n- Terms.Bucket bucket = terms.getBucketByKey(\"\" + expecteds[i]);\n+ for (int i = 0; i < expectedWithCounts.length; i++) {\n+ Terms.Bucket bucket = terms.getBucketByKey(\"\" + expectedWithCounts[i]);\n assertThat(bucket, notNullValue());\n assertThat(bucket.getDocCount(), equalTo(1L));\n }\n+\n+ for (int i = 0; i < expectedZeroCounts.length; i++) {\n+ Terms.Bucket bucket = terms.getBucketByKey(\"\" + expectedZeroCounts[i]);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getDocCount(), equalTo(0L));\n+ }\n }\n- \n- \n- \n+\n+\n+\n public void testSingleValueFieldWithPartitionedFiltering() throws Exception {\n runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME);\n }\n- \n+\n public void testMultiValueFieldWithPartitionedFiltering() throws Exception {\n runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME);\n }\n- \n+\n private void runTestFieldWithPartitionedFiltering(String field) throws Exception {\n // Find total number of unique terms\n SearchResponse allResponse = client().prepareSearch(\"idx\").setTypes(\"type\")\n@@ -348,8 +360,8 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception\n Terms terms = allResponse.getAggregations().get(\"terms\");\n assertThat(terms, notNullValue());\n assertThat(terms.getName(), equalTo(\"terms\"));\n- int expectedCardinality = terms.getBuckets().size(); \n- \n+ int expectedCardinality = terms.getBuckets().size();\n+\n // Gather terms using partitioned aggregations\n final int numPartitions = randomIntBetween(2, 4);\n Set<Number> foundTerms = new HashSet<>();\n@@ -368,9 +380,9 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception\n foundTerms.add(bucket.getKeyAsNumber());\n }\n }\n- assertEquals(expectedCardinality, foundTerms.size()); \n+ assertEquals(expectedCardinality, foundTerms.size());\n }\n- \n+\n \n public void testSingleValueFieldWithMaxSize() throws Exception {\n SearchResponse response = client().prepareSearch(\"idx\").setTypes(\"high_card_type\")", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java", "status": "modified" } ] }
{ "body": "Obviously the following needs work (porting from 1.x)... But this error is just unreal. I'm trying to provide a default value for a terms field: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html and https://www.elastic.co/guide/en/elasticsearch/reference/current/boolean.html\r\n\r\n```\r\nPUT test \r\n{\r\n \"mappings\": {\r\n \"test\": {\r\n \"properties\": {\r\n \"is_first_occurrence\": {\r\n \"type\": \"boolean\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPOST /test/test/_search\r\n{\r\n \"size\": 0,\r\n \"aggs\": {\r\n \"terms_is_first_occurrence\": {\r\n \"terms\": {\r\n \"field\": \"is_first_occurrence\",\r\n \"missing\": \"false\"\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nReturns the following error a very large percentage of the time, otherwise it succeeds\r\n```json\r\n {\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [is_first_occurrence] of type [boolean] does not support custom time zones\"\r\n }\r\n ],\r\n \"type\": \"search_phase_execution_exception\",\r\n \"reason\": \"all shards failed\",\r\n \"phase\": \"query\",\r\n \"grouped\": true,\r\n \"failed_shards\": [\r\n {\r\n \"shard\": 0,\r\n \"index\": \"test\",\r\n \"node\": \"qCyn3KxuRCS2cUBxDMbz2g\",\r\n \"reason\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [is_first_occurrence] of type [boolean] does not support custom time zones\"\r\n }\r\n }\r\n ],\r\n \"caused_by\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [is_first_occurrence] of type [boolean] does not support custom time zones\"\r\n }\r\n },\r\n \"status\": 400\r\n}\r\n```", "comments": [ { "body": "Seems specifying any value for `missing` doesn't work. I also tried `\"include\": 1` and that blew up with \r\n\r\n```json\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_NUMBER [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n }\r\n ],\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_NUMBER [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n },\r\n \"status\": 400\r\n}`\r\n```\r\n\r\nDoing `\"include\": true` gives\r\n\r\n```json\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_BOOLEAN [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n }\r\n ],\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_BOOLEAN [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n },\r\n \"status\": 400\r\n}\r\n```", "created_at": "2016-12-06T21:13:12Z" }, { "body": "Thanks for the bug report. I can reproduce the issue that you describe.", "created_at": "2016-12-07T10:09:51Z" }, { "body": "Also hit this bug, we see it for text fields and boolean fields. Really confusing issue.", "created_at": "2016-12-07T20:24:05Z" }, { "body": "Hit this bug as well. We discovered it does not throw an error with `keywordField` types.\r\n\r\n \"aggregations\" : {\r\n \"statuses\" : {\r\n \"terms\" : {\r\n \"field\" : \"myIntField\",\r\n \"missing\" : \"other\"\r\n }\r\n }\r\n\r\nreturns\r\n\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [myIntField] of type [integer] does not support custom time zones\"\r\n }\r\n ],\r\n \"type\": \"search_phase_execution_exception\",\r\n \"reason\": \"all shards failed\",\r\n \"phase\": \"query\",\r\n \"grouped\": true,\r\n \"failed_shards\": [\r\n {\r\n \"shard\": 0,\r\n \"index\": \"production_index\",\r\n \"node\": \"myNode\",\r\n \"reason\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [myIntField] of type [integer] does not support custom time zones\"\r\n }\r\n }\r\n ],\r\n \"caused_by\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [myIntField] of type [integer] does not support custom time zones\"\r\n }\r\n },\r\n \"status\": 400\r\n }", "created_at": "2016-12-07T20:39:58Z" } ], "number": 22009, "title": "5.0.2 - Terms Aggregate Field [is_first_occurrence] of type [boolean] does not support custom time zones" }
{ "body": "The creation of the `ValuesSource` used to pass `DateTimeZone.UTC` as a time\r\nzone all the time in case of empty fields in spite of the fact that all doc\r\nvalue formats but the date one reject this parameter.\r\n\r\nThis commit centralizes the creation of the `ValuesSource` and adds unit tests\r\nto it.\r\n\r\nCloses #22009", "number": 22135, "review_comments": [], "title": "Fix `missing` on aggs on `boolean` fields." }
{ "commits": [ { "message": "Fix `missing` on aggs on `boolean` fields.\n\nThe creation of the `ValuesSource` used to pass `DateTimeZone.UTC` as a time\nzone all the time in case of empty fields in spite of the fact that all doc\nvalue formats but the date one reject this parameter.\n\nThis commit centralizes the creation of the `ValuesSource` and adds unit tests\nto it.\n\nCloses #22009" } ], "files": [ { "diff": "@@ -262,7 +262,7 @@ public long parseLong(String value, boolean roundUp, LongSupplier now) {\n \n @Override\n public double parseDouble(String value, boolean roundUp, LongSupplier now) {\n- throw new UnsupportedOperationException();\n+ return parseLong(value, roundUp, now);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/DocValueFormat.java", "status": "modified" }, { "diff": "@@ -68,6 +68,7 @@ public boolean isNumeric() {\n }\n },\n IP((byte) 6, \"ip\", \"ip\", ValuesSourceType.BYTES, IndexFieldData.class, DocValueFormat.IP),\n+ // TODO: what is the difference between \"number\" and \"numeric\"?\n NUMERIC((byte) 7, \"numeric\", \"numeric\", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW) {\n @Override\n public boolean isNumeric() {\n@@ -79,6 +80,12 @@ public boolean isNumeric() {\n public boolean isGeoPoint() {\n return true;\n }\n+ },\n+ BOOLEAN((byte) 9, \"boolean\", \"boolean\", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN) {\n+ @Override\n+ public boolean isNumeric() {\n+ return super.isNumeric();\n+ }\n };\n \n final String description;\n@@ -150,7 +157,9 @@ public static ValueType resolveForScript(String type) {\n case \"byte\": return LONG;\n case \"date\": return DATE;\n case \"ip\": return IP;\n+ case \"boolean\": return BOOLEAN;\n default:\n+ // TODO: do not be lenient here\n return null;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java", "status": "modified" }, { "diff": "@@ -18,20 +18,12 @@\n */\n package org.elasticsearch.search.aggregations.support;\n \n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n-import org.elasticsearch.index.fielddata.IndexFieldData;\n-import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;\n-import org.elasticsearch.index.fielddata.IndexNumericFieldData;\n-import org.elasticsearch.index.mapper.MappedFieldType;\n import org.elasticsearch.script.Script;\n-import org.elasticsearch.script.ScriptContext;\n-import org.elasticsearch.script.SearchScript;\n-import org.elasticsearch.search.DocValueFormat;\n-import org.elasticsearch.search.aggregations.AggregationInitializationException;\n import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;\n+import org.elasticsearch.search.aggregations.AggregationInitializationException;\n import org.elasticsearch.search.aggregations.AggregatorFactories;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n@@ -298,98 +290,14 @@ public DateTimeZone timeZone() {\n }\n \n protected ValuesSourceConfig<VS> resolveConfig(SearchContext context) {\n- ValuesSourceConfig<VS> config = config(context);\n- return config;\n+ ValueType valueType = this.valueType != null ? this.valueType : targetValueType;\n+ return ValuesSourceConfig.resolve(context.getQueryShardContext(),\n+ valueType, field, script, missing, timeZone, format);\n }\n \n protected abstract ValuesSourceAggregatorFactory<VS, ?> innerBuild(SearchContext context, ValuesSourceConfig<VS> config,\n AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException;\n \n- public ValuesSourceConfig<VS> config(SearchContext context) {\n-\n- ValueType valueType = this.valueType != null ? this.valueType : targetValueType;\n-\n- if (field == null) {\n- if (script == null) {\n- @SuppressWarnings(\"unchecked\")\n- ValuesSourceConfig<VS> config = new ValuesSourceConfig(ValuesSourceType.ANY);\n- config.format(resolveFormat(null, valueType));\n- return config;\n- }\n- ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : this.valuesSourceType;\n- if (valuesSourceType == null || valuesSourceType == ValuesSourceType.ANY) {\n- // the specific value source type is undefined, but for scripts,\n- // we need to have a specific value source\n- // type to know how to handle the script values, so we fallback\n- // on Bytes\n- valuesSourceType = ValuesSourceType.BYTES;\n- }\n- ValuesSourceConfig<VS> config = new ValuesSourceConfig<VS>(valuesSourceType);\n- config.missing(missing);\n- config.timezone(timeZone);\n- config.format(resolveFormat(format, valueType));\n- config.script(createScript(script, context));\n- config.scriptValueType(valueType);\n- return config;\n- }\n-\n- MappedFieldType fieldType = context.smartNameFieldType(field);\n- if (fieldType == null) {\n- ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : this.valuesSourceType;\n- ValuesSourceConfig<VS> config = new ValuesSourceConfig<>(valuesSourceType);\n- config.missing(missing);\n- config.timezone(timeZone);\n- config.format(resolveFormat(format, valueType));\n- config.unmapped(true);\n- if (valueType != null) {\n- // todo do we really need this for unmapped?\n- config.scriptValueType(valueType);\n- }\n- return config;\n- }\n-\n- IndexFieldData<?> indexFieldData = context.fieldData().getForField(fieldType);\n-\n- ValuesSourceConfig<VS> config;\n- if (valuesSourceType == ValuesSourceType.ANY) {\n- if (indexFieldData instanceof IndexNumericFieldData) {\n- config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC);\n- } else if (indexFieldData instanceof IndexGeoPointFieldData) {\n- config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT);\n- } else {\n- config = new ValuesSourceConfig<>(ValuesSourceType.BYTES);\n- }\n- } else {\n- config = new ValuesSourceConfig(valuesSourceType);\n- }\n-\n- config.fieldContext(new FieldContext(field, indexFieldData, fieldType));\n- config.missing(missing);\n- config.timezone(timeZone);\n- config.script(createScript(script, context));\n- config.format(fieldType.docValueFormat(format, timeZone));\n- return config;\n- }\n-\n- private SearchScript createScript(Script script, SearchContext context) {\n- if (script == null) {\n- return null;\n- } else {\n- return context.getQueryShardContext().getSearchScript(script, ScriptContext.Standard.AGGS);\n- }\n- }\n-\n- private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) {\n- if (valueType == null) {\n- return DocValueFormat.RAW; // we can't figure it out\n- }\n- DocValueFormat valueFormat = valueType.defaultFormat;\n- if (valueFormat instanceof DocValueFormat.Decimal && format != null) {\n- valueFormat = new DocValueFormat.Decimal(format);\n- }\n- return valueFormat;\n- }\n-\n @Override\n public final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {\n builder.startObject();", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -49,7 +49,7 @@ public DateTimeZone timeZone() {\n @Override\n public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket,\n List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n- VS vs = config.toValuesSource(context);\n+ VS vs = config.toValuesSource(context.getQueryShardContext());\n if (vs == null) {\n return createUnmapped(parent, pipelineAggregators, metaData);\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -27,17 +27,115 @@\n import org.elasticsearch.index.fielddata.IndexNumericFieldData;\n import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;\n import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;\n+import org.elasticsearch.index.mapper.MappedFieldType;\n+import org.elasticsearch.index.query.QueryShardContext;\n+import org.elasticsearch.script.Script;\n+import org.elasticsearch.script.ScriptContext;\n import org.elasticsearch.script.SearchScript;\n import org.elasticsearch.search.DocValueFormat;\n-import org.elasticsearch.search.SearchParseException;\n import org.elasticsearch.search.aggregations.AggregationExecutionException;\n-import org.elasticsearch.search.internal.SearchContext;\n import org.joda.time.DateTimeZone;\n \n import java.io.IOException;\n \n+/**\n+ * A configuration that tells aggregations how to retrieve data from the index\n+ * in order to run a specific aggregation.\n+ */\n public class ValuesSourceConfig<VS extends ValuesSource> {\n \n+ /**\n+ * Resolve a {@link ValuesSourceConfig} given configuration parameters.\n+ */\n+ public static <VS extends ValuesSource> ValuesSourceConfig<VS> resolve(\n+ QueryShardContext context,\n+ ValueType valueType,\n+ String field, Script script,\n+ Object missing,\n+ DateTimeZone timeZone,\n+ String format) {\n+\n+ if (field == null) {\n+ if (script == null) {\n+ @SuppressWarnings(\"unchecked\")\n+ ValuesSourceConfig<VS> config = new ValuesSourceConfig<>(ValuesSourceType.ANY);\n+ config.format(resolveFormat(null, valueType));\n+ return config;\n+ }\n+ ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : ValuesSourceType.ANY;\n+ if (valuesSourceType == ValuesSourceType.ANY) {\n+ // the specific value source type is undefined, but for scripts,\n+ // we need to have a specific value source\n+ // type to know how to handle the script values, so we fallback\n+ // on Bytes\n+ valuesSourceType = ValuesSourceType.BYTES;\n+ }\n+ ValuesSourceConfig<VS> config = new ValuesSourceConfig<VS>(valuesSourceType);\n+ config.missing(missing);\n+ config.timezone(timeZone);\n+ config.format(resolveFormat(format, valueType));\n+ config.script(createScript(script, context));\n+ config.scriptValueType(valueType);\n+ return config;\n+ }\n+\n+ MappedFieldType fieldType = context.fieldMapper(field);\n+ if (fieldType == null) {\n+ ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : ValuesSourceType.ANY;\n+ ValuesSourceConfig<VS> config = new ValuesSourceConfig<>(valuesSourceType);\n+ config.missing(missing);\n+ config.timezone(timeZone);\n+ config.format(resolveFormat(format, valueType));\n+ config.unmapped(true);\n+ if (valueType != null) {\n+ // todo do we really need this for unmapped?\n+ config.scriptValueType(valueType);\n+ }\n+ return config;\n+ }\n+\n+ IndexFieldData<?> indexFieldData = context.getForField(fieldType);\n+\n+ ValuesSourceConfig<VS> config;\n+ if (valueType == null) {\n+ if (indexFieldData instanceof IndexNumericFieldData) {\n+ config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC);\n+ } else if (indexFieldData instanceof IndexGeoPointFieldData) {\n+ config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT);\n+ } else {\n+ config = new ValuesSourceConfig<>(ValuesSourceType.BYTES);\n+ }\n+ } else {\n+ config = new ValuesSourceConfig<>(valueType.getValuesSourceType());\n+ }\n+\n+ config.fieldContext(new FieldContext(field, indexFieldData, fieldType));\n+ config.missing(missing);\n+ config.timezone(timeZone);\n+ config.script(createScript(script, context));\n+ config.format(fieldType.docValueFormat(format, timeZone));\n+ return config;\n+ }\n+\n+ private static SearchScript createScript(Script script, QueryShardContext context) {\n+ if (script == null) {\n+ return null;\n+ } else {\n+ return context.getSearchScript(script, ScriptContext.Standard.AGGS);\n+ }\n+ }\n+\n+ private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) {\n+ if (valueType == null) {\n+ return DocValueFormat.RAW; // we can't figure it out\n+ }\n+ DocValueFormat valueFormat = valueType.defaultFormat;\n+ if (valueFormat instanceof DocValueFormat.Decimal && format != null) {\n+ valueFormat = new DocValueFormat.Decimal(format);\n+ }\n+ return valueFormat;\n+ }\n+\n private final ValuesSourceType valueSourceType;\n private FieldContext fieldContext;\n private SearchScript script;\n@@ -125,7 +223,7 @@ public DocValueFormat format() {\n /** Get a value source given its configuration. A return value of null indicates that\n * no value source could be built. */\n @Nullable\n- public VS toValuesSource(SearchContext context) throws IOException {\n+ public VS toValuesSource(QueryShardContext context) throws IOException {\n if (!valid()) {\n throw new IllegalStateException(\n \"value source config is invalid; must have either a field context or a script or marked as unwrapped\");\n@@ -143,8 +241,7 @@ public VS toValuesSource(SearchContext context) throws IOException {\n } else if (valueSourceType() == ValuesSourceType.ANY || valueSourceType() == ValuesSourceType.BYTES) {\n vs = (VS) ValuesSource.Bytes.WithOrdinals.EMPTY;\n } else {\n- throw new SearchParseException(context, \"Can't deal with unmapped ValuesSource type \"\n- + valueSourceType(), null);\n+ throw new IllegalArgumentException(\"Can't deal with unmapped ValuesSource type \" + valueSourceType());\n }\n } else {\n vs = originalValuesSource();\n@@ -162,25 +259,15 @@ public VS toValuesSource(SearchContext context) throws IOException {\n return (VS) MissingValues.replaceMissing((ValuesSource.Bytes) vs, missing);\n }\n } else if (vs instanceof ValuesSource.Numeric) {\n- Number missing = null;\n- if (missing() instanceof Number) {\n- missing = (Number) missing();\n- } else {\n- if (fieldContext() != null && fieldContext().fieldType() != null) {\n- missing = fieldContext().fieldType().docValueFormat(null, DateTimeZone.UTC)\n- .parseDouble(missing().toString(), false, context.getQueryShardContext()::nowInMillis);\n- } else {\n- missing = Double.parseDouble(missing().toString());\n- }\n- }\n+ Number missing = format.parseDouble(missing().toString(), false, context::nowInMillis);\n return (VS) MissingValues.replaceMissing((ValuesSource.Numeric) vs, missing);\n } else if (vs instanceof ValuesSource.GeoPoint) {\n // TODO: also support the structured formats of geo points\n final GeoPoint missing = GeoUtils.parseGeoPoint(missing().toString(), new GeoPoint());\n return (VS) MissingValues.replaceMissing((ValuesSource.GeoPoint) vs, missing);\n } else {\n // Should not happen\n- throw new SearchParseException(context, \"Can't apply missing values on a \" + vs.getClass(), null);\n+ throw new IllegalArgumentException(\"Can't apply missing values on a \" + vs.getClass());\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java", "status": "modified" }, { "diff": "@@ -0,0 +1,49 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.search.aggregations.support;\n+\n+import org.elasticsearch.test.ESTestCase;\n+\n+public class ValueTypeTests extends ESTestCase {\n+\n+ public void testResolve() {\n+ assertEquals(ValueType.STRING, ValueType.resolveForScript(\"string\"));\n+ assertEquals(ValueType.DOUBLE, ValueType.resolveForScript(\"float\"));\n+ assertEquals(ValueType.DOUBLE, ValueType.resolveForScript(\"double\"));\n+ assertEquals(ValueType.LONG, ValueType.resolveForScript(\"byte\"));\n+ assertEquals(ValueType.LONG, ValueType.resolveForScript(\"short\"));\n+ assertEquals(ValueType.LONG, ValueType.resolveForScript(\"integer\"));\n+ assertEquals(ValueType.LONG, ValueType.resolveForScript(\"long\"));\n+ assertEquals(ValueType.DATE, ValueType.resolveForScript(\"date\"));\n+ assertEquals(ValueType.IP, ValueType.resolveForScript(\"ip\"));\n+ assertEquals(ValueType.BOOLEAN, ValueType.resolveForScript(\"boolean\"));\n+ }\n+\n+ public void testCompatibility() {\n+ assertTrue(ValueType.DOUBLE.isA(ValueType.NUMERIC));\n+ assertTrue(ValueType.LONG.isA(ValueType.NUMERIC));\n+ assertTrue(ValueType.DATE.isA(ValueType.LONG));\n+ assertTrue(ValueType.NUMERIC.isA(ValueType.NUMBER));\n+ assertTrue(ValueType.BOOLEAN.isA(ValueType.NUMBER));\n+ assertFalse(ValueType.STRING.isA(ValueType.NUMBER));\n+ assertFalse(ValueType.DATE.isA(ValueType.IP));\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/support/ValueTypeTests.java", "status": "added" }, { "diff": "@@ -0,0 +1,264 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.search.aggregations.support;\n+\n+import org.apache.lucene.index.LeafReaderContext;\n+import org.apache.lucene.index.SortedNumericDocValues;\n+import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.action.support.WriteRequest;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.IndexService;\n+import org.elasticsearch.index.engine.Engine.Searcher;\n+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n+import org.elasticsearch.index.query.QueryShardContext;\n+import org.elasticsearch.test.ESSingleNodeTestCase;\n+\n+public class ValuesSourceConfigTests extends ESSingleNodeTestCase {\n+\n+ public void testKeyword() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\",\n+ \"bytes\", \"type=keyword\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource(\"bytes\", \"abc\")\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Bytes> config = ValuesSourceConfig.resolve(\n+ context, null, \"bytes\", null, null, null, null);\n+ ValuesSource.Bytes valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedBinaryDocValues values = valuesSource.bytesValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(new BytesRef(\"abc\"), values.valueAt(0));\n+ }\n+ }\n+\n+ public void testEmptyKeyword() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\",\n+ \"bytes\", \"type=keyword\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource()\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Bytes> config = ValuesSourceConfig.resolve(\n+ context, null, \"bytes\", null, null, null, null);\n+ ValuesSource.Bytes valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedBinaryDocValues values = valuesSource.bytesValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(0, values.count());\n+\n+ config = ValuesSourceConfig.resolve(\n+ context, null, \"bytes\", null, \"abc\", null, null);\n+ valuesSource = config.toValuesSource(context);\n+ values = valuesSource.bytesValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(new BytesRef(\"abc\"), values.valueAt(0));\n+ }\n+ }\n+\n+ public void testUnmappedKeyword() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource()\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+ ValuesSourceConfig<ValuesSource.Bytes> config = ValuesSourceConfig.resolve(\n+ context, ValueType.STRING, \"bytes\", null, null, null, null);\n+ ValuesSource.Bytes valuesSource = config.toValuesSource(context);\n+ assertNull(valuesSource);\n+\n+ config = ValuesSourceConfig.resolve(\n+ context, ValueType.STRING, \"bytes\", null, \"abc\", null, null);\n+ valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedBinaryDocValues values = valuesSource.bytesValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(new BytesRef(\"abc\"), values.valueAt(0));\n+ }\n+ }\n+\n+ public void testLong() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\",\n+ \"long\", \"type=long\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource(\"long\", 42)\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Numeric> config = ValuesSourceConfig.resolve(\n+ context, null, \"long\", null, null, null, null);\n+ ValuesSource.Numeric valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(42, values.valueAt(0));\n+ }\n+ }\n+\n+ public void testEmptyLong() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\",\n+ \"long\", \"type=long\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource()\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Numeric> config = ValuesSourceConfig.resolve(\n+ context, null, \"long\", null, null, null, null);\n+ ValuesSource.Numeric valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(0, values.count());\n+\n+ config = ValuesSourceConfig.resolve(\n+ context, null, \"long\", null, 42, null, null);\n+ valuesSource = config.toValuesSource(context);\n+ values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(42, values.valueAt(0));\n+ }\n+ }\n+\n+ public void testUnmappedLong() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource()\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Numeric> config = ValuesSourceConfig.resolve(\n+ context, ValueType.NUMBER, \"long\", null, null, null, null);\n+ ValuesSource.Numeric valuesSource = config.toValuesSource(context);\n+ assertNull(valuesSource);\n+\n+ config = ValuesSourceConfig.resolve(\n+ context, ValueType.NUMBER, \"long\", null, 42, null, null);\n+ valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(42, values.valueAt(0));\n+ }\n+ }\n+\n+ public void testBoolean() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\",\n+ \"bool\", \"type=boolean\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource(\"bool\", true)\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Numeric> config = ValuesSourceConfig.resolve(\n+ context, null, \"bool\", null, null, null, null);\n+ ValuesSource.Numeric valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(1, values.valueAt(0));\n+ }\n+ }\n+\n+ public void testEmptyBoolean() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\",\n+ \"bool\", \"type=boolean\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource()\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Numeric> config = ValuesSourceConfig.resolve(\n+ context, null, \"bool\", null, null, null, null);\n+ ValuesSource.Numeric valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(0, values.count());\n+\n+ config = ValuesSourceConfig.resolve(\n+ context, null, \"bool\", null, true, null, null);\n+ valuesSource = config.toValuesSource(context);\n+ values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(1, values.valueAt(0));\n+ }\n+ }\n+\n+ public void testUnmappedBoolean() throws Exception {\n+ IndexService indexService = createIndex(\"index\", Settings.EMPTY, \"type\");\n+ client().prepareIndex(\"index\", \"type\", \"1\")\n+ .setSource()\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ try (Searcher searcher = indexService.getShard(0).acquireSearcher(\"test\")) {\n+ QueryShardContext context = indexService.newQueryShardContext(0, searcher.reader(), () -> 42L);\n+\n+ ValuesSourceConfig<ValuesSource.Numeric> config = ValuesSourceConfig.resolve(\n+ context, ValueType.BOOLEAN, \"bool\", null, null, null, null);\n+ ValuesSource.Numeric valuesSource = config.toValuesSource(context);\n+ assertNull(valuesSource);\n+\n+ config = ValuesSourceConfig.resolve(\n+ context, ValueType.BOOLEAN, \"bool\", null, true, null, null);\n+ valuesSource = config.toValuesSource(context);\n+ LeafReaderContext ctx = searcher.reader().leaves().get(0);\n+ SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ values.setDocument(0);\n+ assertEquals(1, values.count());\n+ assertEquals(1, values.valueAt(0));\n+ }\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java", "status": "added" }, { "diff": "@@ -49,7 +49,7 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu\n HashMap<String, VS> valuesSources = new HashMap<>();\n \n for (Map.Entry<String, ValuesSourceConfig<VS>> config : configs.entrySet()) {\n- VS vs = config.getValue().toValuesSource(context);\n+ VS vs = config.getValue().toValuesSource(context.getQueryShardContext());\n if (vs != null) {\n valuesSources.put(config.getKey(), vs);\n }", "filename": "modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java", "status": "modified" } ] }
{ "body": "This commit fixes the handling of spaces in Windows paths. The current\r\nmechanism works fine in a path that contains a single space, but fails\r\non a path that contains multiple spaces. With this commit, that is no\r\nlonger the case.\r\n\r\nRelates #20809, relates #21525", "comments": [ { "body": "I've manually tested this for both running Elasticsearch from the command line and the service, and with paths that contain zero, one, two, and three spaces and everything works as expected.", "created_at": "2016-12-01T20:15:28Z" }, { "body": "This LGTM. Should we consider (in a follow up) having our integ test dir which contains a space have two spaces (I did not realize it was possible to have different behavior for one vs two spaces).", "created_at": "2016-12-01T23:16:06Z" }, { "body": "> Should we consider (in a follow up) having our integ test dir which contains a space have two spaces (I did not realize it was possible to have different behavior for one vs two spaces)\r\n\r\nYes, I was thinking the same thing. Lacking packaging tests, that's test best that we can do but it's been really effective for us in the past.", "created_at": "2016-12-01T23:30:53Z" }, { "body": "Thanks @rjernst.", "created_at": "2016-12-02T01:41:54Z" }, { "body": "When I read this it seems the issue is not closed and included in a release package but when I download the elasticsearch zip from here https://www.elastic.co/downloads/elasticsearch it's already included. Did something went wrong?\r\n\r\nThis fix in not working for me. I tested the version 5.1.1 on windows 7 (x64) and windows server 2012 R2 (x64) standard and had the same issue every time.\r\n\r\nThe quote around the %JAVA_HOME% and %JAVA% variables are not placed correctly which prevent the scripts to start the node and to detect the JVM version which always default the service installation to the x86 version.\r\n\r\nMy JAVA_HOME variable is set with the value: C:\\Program Files\\Java\\jdk1.8.0_111\\\r\nMy installation folder for elasticsearch is: D:\\ELK\\elasticsearch\\\r\n\r\n######Result of the elasticsearch.bat execution before the fix:\r\n![image](https://cloud.githubusercontent.com/assets/8226325/21173345/93ff6544-c1d7-11e6-8e6c-07f6bc62c384.png)\r\n\r\n######Result of the elasticsearch-service.bat execution before the fix:\r\n![image](https://cloud.githubusercontent.com/assets/8226325/21173471/2b469ba2-c1d8-11e6-898a-05017e5c4ba7.png)\r\n\r\n\r\nTo fix the problem I had to do the following changes:\r\n* In elastisearch.bat\r\n * Line 82\r\n * From: %JAVA% %ES_JAVA_OPTS% %ES_PARAMS% -cp \"%ES_CLASSPATH%\" \"org.elasticsearch.bootstrap.Elasticsearch\" !newparams!\r\n * To: \"%JAVA%\" %ES_JAVA_OPTS% %ES_PARAMS% -cp \"%ES_CLASSPATH%\" \"org.elasticsearch.bootstrap.Elasticsearch\" !newparams!\r\n* In elasticsearch.in.bat\r\n * Line 4\r\n * From: set JAVA=\"%JAVA_HOME%\"\\bin\\java.exe\r\n * To: set JAVA=%JAVA_HOME%\\bin\\java.exe\r\n * Line 8\r\n * From: IF NOT EXIST %JAVA% (\r\n * To: IF NOT EXIST \"%JAVA%\" (\r\n* In Elasticsearch-service.bat\r\n * Line 32\r\n * From: SET JAVA=\"%JAVA_HOME%\"\\bin\\java.exe\r\n * To: SET JAVA=%JAVA_HOME%\\bin\\java.exe\r\n * Line 36\r\n * From: IF NOT EXIST %JAVA% (\r\n * To: IF NOT EXIST \"%JAVA%\" (\r\n * Line 55\r\n * From: %JAVA% -Xmx50M -version > nul 2>&1\r\n * To: \"%JAVA%\" -Xmx50M -version > nul 2>&1\r\n * Line 62\r\n * From: %JAVA% -Xmx50M -version 2>&1 | \"%windir%\\System32\\find\" \"64-Bit\" >nul:\r\n * To: \"%JAVA%\" -Xmx50M -version 2>&1 | \"%windir%\\System32\\find\" \"64-Bit\" >nul:\r\n * Line 149\r\n * From: if exist \"%JAVA_HOME%\"\\jre\\bin\\server\\jvm.dll (\r\n * To: if exist \"%JAVA_HOME%\\jre\\bin\\server\\jvm.dll\" (\r\n * Line 155\r\n * From: if exist \"%JAVA_HOME%\"\\bin\\server\\jvm.dll (\r\n * To: if exist \"%JAVA_HOME%\\bin\\server\\jvm.dll\" (\r\n * Line 161\r\n * From: if exist \"%JAVA_HOME%\"\\bin\\client\\jvm.dll (\r\n * To: if exist \"%JAVA_HOME%\\bin\\client\\jvm.dll\" (\r\n \r\n\r\nHere are the files with the fixes:\r\n\r\n[elasticsearch-service.bat.txt](https://github.com/elastic/elasticsearch/files/650998/elasticsearch-service.bat.txt)\r\n[elasticsearch.bat.txt](https://github.com/elastic/elasticsearch/files/650999/elasticsearch.bat.txt)\r\n[elasticsearch.in.bat.txt](https://github.com/elastic/elasticsearch/files/650997/elasticsearch.in.bat.txt)\r\n\r\nHope it helps.", "created_at": "2016-12-14T07:46:17Z" }, { "body": "@yannlebel If you remove the trailing backslash from `JAVA_HOME`, what is packaged today works fine. There is an open PR (#22132) to address this.", "created_at": "2016-12-14T11:48:35Z" }, { "body": "Sorry my mistake I didn't read the changes in the commit correctly and thought this was the PR and #22132 was an issue. I tried without the trailing slash and indead it is working.", "created_at": "2016-12-14T12:05:35Z" } ], "number": 21921, "title": "Fix handling of spaces in Windows paths" }
{ "body": "A previous fix for the handling of paths on Windows related to paths\r\ncontaining multiple spaces introduced a issue where if JAVA_HOME ends\r\nwith a backslash, then Elasticsearch will refuse to start. This is not a\r\ncritical bug as a workaround exists (remove the trailing backslash), but\r\nshould be fixed nevertheless. This commit addresses this situation while\r\nnot regressing the previous fix.\r\n\r\nRelates #21921\r\n", "number": 22132, "review_comments": [], "title": "Another fix for handling of paths on Windows" }
{ "commits": [ { "message": "Another fix for handling of paths on Windows\n\nA previous fix for the handling of paths on Windows related to paths\ncontaining multiple spaces introduced a issue where if JAVA_HOME ends\nwith a backslash, then Elasticsearch will refuse to start. This is not a\ncritical bug as a workaround exists (remove the trailing backslash), but\nshould be fixed nevertheless. This commit addresses this situation while\nnot regressing the previous fix." } ], "files": [ { "diff": "@@ -4,7 +4,7 @@ SETLOCAL enabledelayedexpansion\n TITLE Elasticsearch Service ${project.version}\n \n IF DEFINED JAVA_HOME (\n- SET JAVA=\"%JAVA_HOME%\"\\bin\\java.exe\n+ SET JAVA=\"%JAVA_HOME%\\bin\\java.exe\"\n ) ELSE (\n FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I\n )\n@@ -121,19 +121,19 @@ echo Installing service : \"%SERVICE_ID%\"\n echo Using JAVA_HOME (%ARCH%): \"%JAVA_HOME%\"\n \n rem Check JVM server dll first\n-if exist \"%JAVA_HOME%\"\\jre\\bin\\server\\jvm.dll (\n+if exist \"%JAVA_HOME%\\jre\\bin\\server\\jvm.dll\" (\n \tset JVM_DLL=\\jre\\bin\\server\\jvm.dll\n \tgoto foundJVM\n )\n \n rem Check 'server' JRE (JRE installed on Windows Server)\n-if exist \"%JAVA_HOME%\"\\bin\\server\\jvm.dll (\n+if exist \"%JAVA_HOME%\\bin\\server\\jvm.dll\" (\n \tset JVM_DLL=\\bin\\server\\jvm.dll\n \tgoto foundJVM\n )\n \n rem Fallback to 'client' JRE\n-if exist \"%JAVA_HOME%\"\\bin\\client\\jvm.dll (\n+if exist \"%JAVA_HOME%\\bin\\client\\jvm.dll\" (\n \tset JVM_DLL=\\bin\\client\\jvm.dll\n \techo Warning: JAVA_HOME points to a JRE and not JDK installation; a client (not a server^) JVM will be used...\n ) else (", "filename": "distribution/src/main/resources/bin/elasticsearch-service.bat", "status": "modified" }, { "diff": "@@ -1,7 +1,7 @@\n @echo off\n \n IF DEFINED JAVA_HOME (\n- set JAVA=\"%JAVA_HOME%\"\\bin\\java.exe\n+ set JAVA=\"%JAVA_HOME%\\bin\\java.exe\"\n ) ELSE (\n FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I\n )", "filename": "distribution/src/main/resources/bin/elasticsearch.in.bat", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.1.1\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nI'm installing a plugin like x-pack but when x-pack ask me for some authorization, I just `CTRL+C` to stop the process.\r\nThe plugin manager does not clean the temporary directory apparently so when I ask for the plugin list, I'm getting a temporary dir as a plugin name.\r\n\r\n**Steps to reproduce**:\r\n 1. `wget https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-5.1.1.zip`\r\n 2. `bin/elasticsearch-plugin install file:///path/to/x-pack-5.1.1.zip`\r\n 3. When it prompts `Continue with installation? [y/N]`, press `CTRL+C`\r\n 4. `bin/elasticsearch-plugin list`\r\n\r\nIt gives:\r\n\r\n```sh\r\n.installing-2096181796179285823\r\n```\r\n\r\nWe should either tell the user that some pending job is still in progress or ignore pending files or to clean up things when the install process is stopped before it's done.\r\n", "comments": [], "number": 22111, "title": "PluginManager list reports pending installation as a plugin" }
{ "body": "This commit enables CLI commands to be closeable and installs a runtime\r\nshutdown hook to ensure that if the JVM shuts down (as opposed to\r\naborting) the close method is called.\r\n\r\nIt is not enough to wrap uses of commands in main methods in\r\ntry-with-resources blocks as these will not run if, say, the virtual\r\nmachine is terminated in response to SIGINT, or system shutdown event.\r\n\r\nCloses #22111", "number": 22126, "review_comments": [ { "body": "How will this react in tests? We would end up creating a bunch of hooks for temp files?", "created_at": "2016-12-12T19:11:59Z" }, { "body": "Should we still log something here, in case `terminal.println` throws an IOException? Or the impossible becomes possible (hey, you never know with JDK9)", "created_at": "2016-12-12T19:45:35Z" }, { "body": "And by \"log\", I guess worst case scenario is `System.out.println(\"The impossible happened: \" + ...);`", "created_at": "2016-12-12T19:47:44Z" }, { "body": "I'm wondering if we should have the hook be added by the main method for Command, so that we can test. For example, have the Command be Closeable, and in close rm the files we have stashed?", "created_at": "2016-12-12T19:51:05Z" }, { "body": "`Terminal#println` does not declare a checked `IOException`. The only method throwing a checked `IOException` from that body is the implicit `StringWriter#close` from the try-with-resources.", "created_at": "2016-12-12T19:52:36Z" }, { "body": "I think this satisfies the TODO below, which can be removed?", "created_at": "2016-12-12T19:53:33Z" }, { "body": "I like the suggestion; I pushed 1b474fd47f9655f015787d8a1c964508a5da9c95. Let me know what you think?", "created_at": "2016-12-12T21:21:39Z" }, { "body": "I removed it when I pushed 1b474fd47f9655f015787d8a1c964508a5da9c95.", "created_at": "2016-12-12T21:21:47Z" }, { "body": "Right, but even though the Javadocs for StringWriter say that an exception here is impossible, that doesn't prohibit a non-breaking (because it already declares that it throws it!) change in JDK 10+ where it could throw an exception? I just wanted to err on the side of paranoia and non-swallowing exceptions", "created_at": "2016-12-12T21:42:01Z" }, { "body": "That's fair. Since it's a violation of our assumptions no matter how we get there, I opted to throw an `AssertionError`. I pushed fed6da3703d26f83a080961823eaa5d68dc0c6ff.", "created_at": "2016-12-12T22:16:20Z" }, { "body": "`rm` takes multiple paths, why not use that? It handles exceptions from any of them.", "created_at": "2016-12-13T23:34:08Z" }, { "body": "I pushed 00fb1c656bad6a69f7a322589358c11575f41fbb.", "created_at": "2016-12-14T00:03:00Z" } ], "title": "Add shutdown hook for closing CLI commands" }
{ "commits": [ { "message": "Ensure temp files are removed on plugin install\n\nThis commit ensures that temp files created during plugin install are\ncleaned up if the JVM terminates normally.\n\nOne case where these files can be left around is if the user aborts the\ninstall when confirmation to grant permissions is displayed to the user." }, { "message": "Add shutdown hook for closing CLI commands\n\nThis commit enables CLI commands to be closeable and installs a runtime\nshutdown hook to ensure that if the JVM shuts down (as opposed to\naborting) the close method is called.\n\nIt is not enough to wrap uses of commands in main methods in\ntry-with-resources blocks as these will not run if, say, the virtual\nmachine is terminated in response to SIGINT, or system shutdown event." }, { "message": "Throw assertion error on surprise IOException\n\nWe are forced to have a catch block because closing a StringWriter\ndeclares a checked IOException, but the Javadocs say this is\nimpossible. If an exception is actually thrown here, it is a violations\nof our assumptions of what is possible and should be uncovered." }, { "message": "Fix typo in comment in o/e/c/Command.java\n\nThis fixes a silly typo in o/e/c/Command.java." }, { "message": "Strengthen command shutdown hook output assertions\n\nIf no exception is thrown by close, there should not be any output. This\ncommit adds an assertion for that." }, { "message": "Mark command shutdown hook thread field as final\n\nThis field can and should be final, so we mark it as such." }, { "message": "Add hook to skip shutdown hook in command tests\n\nThis commit adds a hook for skipping the shutdown hook in command tests,\notherwise we install a bunch of unneeded hooks during shutdown tests,\nand need special permissions to do so anyway." }, { "message": "Simplify InstallPluginCommand#close\n\nIOUtils#rm already does the hard work of handling exceptions for us, and\nit can handle deleting multiple paths at the same time. This commit\nsimplifies InstallPluginCommand#close in this spirit." } ], "files": [ { "diff": "@@ -24,17 +24,21 @@\n import joptsimple.OptionSet;\n import joptsimple.OptionSpec;\n import org.apache.logging.log4j.Level;\n+import org.apache.lucene.util.SetOnce;\n import org.elasticsearch.common.SuppressForbidden;\n import org.elasticsearch.common.logging.LogConfigurator;\n import org.elasticsearch.common.settings.Settings;\n \n+import java.io.Closeable;\n import java.io.IOException;\n+import java.io.PrintWriter;\n+import java.io.StringWriter;\n import java.util.Arrays;\n \n /**\n * An action to execute within a cli.\n */\n-public abstract class Command {\n+public abstract class Command implements Closeable {\n \n /** A description of the command, used in the help output. */\n protected final String description;\n@@ -44,15 +48,37 @@ public abstract class Command {\n \n private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList(\"h\", \"help\"), \"show help\").forHelp();\n private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList(\"s\", \"silent\"), \"show minimal output\");\n- private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList(\"v\", \"verbose\"), \"show verbose output\")\n- .availableUnless(silentOption);\n+ private final OptionSpec<Void> verboseOption =\n+ parser.acceptsAll(Arrays.asList(\"v\", \"verbose\"), \"show verbose output\").availableUnless(silentOption);\n \n public Command(String description) {\n this.description = description;\n }\n \n+ final SetOnce<Thread> shutdownHookThread = new SetOnce<>();\n+\n /** Parses options for this command from args and executes it. */\n public final int main(String[] args, Terminal terminal) throws Exception {\n+ if (addShutdownHook()) {\n+ shutdownHookThread.set(new Thread(() -> {\n+ try {\n+ this.close();\n+ } catch (final IOException e) {\n+ try (\n+ final StringWriter sw = new StringWriter();\n+ final PrintWriter pw = new PrintWriter(sw)) {\n+ e.printStackTrace(pw);\n+ terminal.println(sw.toString());\n+ } catch (final IOException impossible) {\n+ // StringWriter#close declares a checked IOException from the Closeable interface but the Javadocs for StringWriter\n+ // say that an exception here is impossible\n+ throw new AssertionError(impossible);\n+ }\n+ }\n+ }));\n+ Runtime.getRuntime().addShutdownHook(shutdownHookThread.get());\n+ }\n+\n // initialize default for es.logger.level because we will not read the log4j2.properties\n final String loggerLevel = System.getProperty(\"es.logger.level\", Level.INFO.name());\n final Settings settings = Settings.builder().put(\"logger.level\", loggerLevel).build();\n@@ -118,4 +144,19 @@ protected static void exit(int status) {\n * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */\n protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;\n \n+ /**\n+ * Return whether or not to install the shutdown hook to cleanup resources on exit. This method should only be overridden in test\n+ * classes.\n+ *\n+ * @return whether or not to install the shutdown hook\n+ */\n+ protected boolean addShutdownHook() {\n+ return true;\n+ }\n+\n+ @Override\n+ public void close() throws IOException {\n+\n+ }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/cli/Command.java", "status": "modified" }, { "diff": "@@ -320,7 +320,7 @@ public void onProgress(int percent) {\n /** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */\n private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tmpDir) throws Exception {\n Path zip = downloadZip(terminal, urlString, tmpDir);\n-\n+ pathsToDeleteOnShutdown.add(zip);\n URL checksumUrl = new URL(urlString + \".sha1\");\n final String expectedChecksum;\n try (InputStream in = checksumUrl.openStream()) {\n@@ -344,9 +344,9 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException\n // unzip plugin to a staging temp dir\n \n final Path target = stagingDirectory(pluginsDir);\n+ pathsToDeleteOnShutdown.add(target);\n \n boolean hasEsDir = false;\n- // TODO: we should wrap this in a try/catch and try deleting the target dir on failure?\n try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {\n ZipEntry entry;\n byte[] buffer = new byte[8192];\n@@ -605,4 +605,12 @@ private static void setFileAttributes(final Path path, final Set<PosixFilePermis\n Files.setPosixFilePermissions(path, permissions);\n }\n }\n+\n+ private final List<Path> pathsToDeleteOnShutdown = new ArrayList<>();\n+\n+ @Override\n+ public void close() throws IOException {\n+ IOUtils.rm(pathsToDeleteOnShutdown.toArray(new Path[pathsToDeleteOnShutdown.size()]));\n+ }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java", "status": "modified" }, { "diff": "@@ -19,27 +19,37 @@\n \n package org.elasticsearch.plugins;\n \n+import org.apache.lucene.util.IOUtils;\n+import org.elasticsearch.cli.Command;\n import org.elasticsearch.cli.MultiCommand;\n import org.elasticsearch.cli.Terminal;\n-import org.elasticsearch.common.logging.LogConfigurator;\n-import org.elasticsearch.common.settings.Settings;\n-import org.elasticsearch.env.Environment;\n-import org.elasticsearch.node.internal.InternalSettingsPreparer;\n+\n+import java.io.IOException;\n+import java.util.Collection;\n+import java.util.Collections;\n \n /**\n * A cli tool for adding, removing and listing plugins for elasticsearch.\n */\n public class PluginCli extends MultiCommand {\n \n+ private final Collection<Command> commands;\n+\n private PluginCli() {\n super(\"A tool for managing installed elasticsearch plugins\");\n subcommands.put(\"list\", new ListPluginsCommand());\n subcommands.put(\"install\", new InstallPluginCommand());\n subcommands.put(\"remove\", new RemovePluginCommand());\n+ commands = Collections.unmodifiableCollection(subcommands.values());\n }\n \n public static void main(String[] args) throws Exception {\n exit(new PluginCli().main(args, Terminal.DEFAULT));\n }\n \n+ @Override\n+ public void close() throws IOException {\n+ IOUtils.close(commands);\n+ }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/plugins/PluginCli.java", "status": "modified" }, { "diff": "@@ -44,7 +44,7 @@\n /**\n * A command for the plugin cli to remove a plugin from elasticsearch.\n */\n-final class RemovePluginCommand extends SettingCommand {\n+class RemovePluginCommand extends SettingCommand {\n \n private final OptionSpec<String> arguments;\n ", "filename": "core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java", "status": "modified" }, { "diff": "@@ -26,41 +26,67 @@\n public class CommandTests extends ESTestCase {\n \n static class UserErrorCommand extends Command {\n+\n UserErrorCommand() {\n super(\"Throws a user error\");\n }\n+\n @Override\n protected void execute(Terminal terminal, OptionSet options) throws Exception {\n throw new UserException(ExitCodes.DATA_ERROR, \"Bad input\");\n }\n+\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+\n }\n \n static class UsageErrorCommand extends Command {\n+\n UsageErrorCommand() {\n super(\"Throws a usage error\");\n }\n+\n @Override\n protected void execute(Terminal terminal, OptionSet options) throws Exception {\n throw new UserException(ExitCodes.USAGE, \"something was no good\");\n }\n+\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+\n }\n \n static class NoopCommand extends Command {\n+\n boolean executed = false;\n+\n NoopCommand() {\n super(\"Does nothing\");\n }\n+\n @Override\n protected void execute(Terminal terminal, OptionSet options) throws Exception {\n terminal.println(\"Normal output\");\n terminal.println(Terminal.Verbosity.SILENT, \"Silent output\");\n terminal.println(Terminal.Verbosity.VERBOSE, \"Verbose output\");\n executed = true;\n }\n+\n @Override\n protected void printAdditionalHelp(Terminal terminal) {\n terminal.println(\"Some extra help\");\n }\n+\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+\n }\n \n public void testHelp() throws Exception {\n@@ -92,7 +118,7 @@ public void testVerbositySilentAndVerbose() throws Exception {\n command.mainWithoutErrorHandling(args, terminal);\n });\n assertTrue(e.getMessage(),\n- e.getMessage().contains(\"Option(s) [v/verbose] are unavailable given other options on the command line\"));\n+ e.getMessage().contains(\"Option(s) [v/verbose] are unavailable given other options on the command line\"));\n }\n \n public void testSilentVerbosity() throws Exception {\n@@ -143,4 +169,5 @@ public void testUsageError() throws Exception {\n assertTrue(output, output.contains(\"Throws a usage error\"));\n assertTrue(output, output.contains(\"ERROR: something was no good\"));\n }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/cli/CommandTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,69 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.cli;\n+\n+import joptsimple.OptionSet;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import java.io.IOException;\n+import java.util.concurrent.atomic.AtomicBoolean;\n+\n+import static org.hamcrest.CoreMatchers.containsString;\n+import static org.hamcrest.Matchers.isEmptyString;\n+\n+public class EvilCommandTests extends ESTestCase {\n+\n+ public void testCommandShutdownHook() throws Exception {\n+ final AtomicBoolean closed = new AtomicBoolean();\n+ final boolean shouldThrow = randomBoolean();\n+ final Command command = new Command(\"test-command-shutdown-hook\") {\n+ @Override\n+ protected void execute(Terminal terminal, OptionSet options) throws Exception {\n+\n+ }\n+\n+ @Override\n+ public void close() throws IOException {\n+ closed.set(true);\n+ if (shouldThrow) {\n+ throw new IOException(\"fail\");\n+ }\n+ }\n+ };\n+ final MockTerminal terminal = new MockTerminal();\n+ command.main(new String[0], terminal);\n+ assertNotNull(command.shutdownHookThread.get());\n+ // successful removal here asserts that the runtime hook was installed in Command#main\n+ assertTrue(Runtime.getRuntime().removeShutdownHook(command.shutdownHookThread.get()));\n+ command.shutdownHookThread.get().run();\n+ command.shutdownHookThread.get().join();\n+ assertTrue(closed.get());\n+ final String output = terminal.getOutput();\n+ if (shouldThrow) {\n+ // ensure that we dump the exception\n+ assertThat(output, containsString(\"java.io.IOException: fail\"));\n+ // ensure that we dump the stack trace too\n+ assertThat(output, containsString(\"\\tat org.elasticsearch.cli.EvilCommandTests$1.close\"));\n+ } else {\n+ assertThat(output, isEmptyString());\n+ }\n+ }\n+\n+}", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilCommandTests.java", "status": "added" }, { "diff": "@@ -603,7 +603,12 @@ public void testZipRelativeOutsideEntryName() throws Exception {\n \n public void testOfficialPluginsHelpSorted() throws Exception {\n MockTerminal terminal = new MockTerminal();\n- new InstallPluginCommand().main(new String[] { \"--help\" }, terminal);\n+ new InstallPluginCommand() {\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+ }.main(new String[] { \"--help\" }, terminal);\n try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) {\n String line = reader.readLine();\n \n@@ -625,7 +630,12 @@ public void testOfficialPluginsHelpSorted() throws Exception {\n \n public void testOfficialPluginsIncludesXpack() throws Exception {\n MockTerminal terminal = new MockTerminal();\n- new InstallPluginCommand().main(new String[] { \"--help\" }, terminal);\n+ new InstallPluginCommand() {\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+ }.main(new String[] { \"--help\" }, terminal);\n assertTrue(terminal.getOutput(), terminal.getOutput().contains(\"x-pack\"));\n }\n ", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java", "status": "modified" }, { "diff": "@@ -59,21 +59,26 @@ public void setUp() throws Exception {\n static MockTerminal listPlugins(Path home) throws Exception {\n return listPlugins(home, new String[0]);\n }\n- \n+\n static MockTerminal listPlugins(Path home, String[] args) throws Exception {\n String[] argsAndHome = new String[args.length + 1];\n System.arraycopy(args, 0, argsAndHome, 0, args.length);\n argsAndHome[args.length] = \"-Epath.home=\" + home;\n MockTerminal terminal = new MockTerminal();\n- int status = new ListPluginsCommand().main(argsAndHome, terminal);\n+ int status = new ListPluginsCommand() {\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+ }.main(argsAndHome, terminal);\n assertEquals(ExitCodes.OK, status);\n return terminal;\n }\n- \n+\n static String buildMultiline(String... args){\n return Arrays.asList(args).stream().collect(Collectors.joining(\"\\n\", \"\", \"\\n\"));\n }\n- \n+\n static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException {\n PluginTestUtil.writeProperties(env.pluginsFile().resolve(name),\n \"description\", description,\n@@ -108,15 +113,15 @@ public void testTwoPlugins() throws Exception {\n MockTerminal terminal = listPlugins(home);\n assertEquals(terminal.getOutput(), buildMultiline(\"fake1\", \"fake2\"));\n }\n- \n+\n public void testPluginWithVerbose() throws Exception {\n buildFakePlugin(env, \"fake desc\", \"fake_plugin\", \"org.fake\");\n String[] params = { \"-v\" };\n MockTerminal terminal = listPlugins(home, params);\n assertEquals(terminal.getOutput(), buildMultiline(\"Plugins directory: \" + env.pluginsFile(), \"fake_plugin\",\n \"- Plugin information:\", \"Name: fake_plugin\", \"Description: fake desc\", \"Version: 1.0\", \" * Classname: org.fake\"));\n }\n- \n+\n public void testPluginWithVerboseMultiplePlugins() throws Exception {\n buildFakePlugin(env, \"fake desc 1\", \"fake_plugin1\", \"org.fake\");\n buildFakePlugin(env, \"fake desc 2\", \"fake_plugin2\", \"org.fake2\");\n@@ -127,27 +132,27 @@ public void testPluginWithVerboseMultiplePlugins() throws Exception {\n \" * Classname: org.fake\", \"fake_plugin2\", \"- Plugin information:\", \"Name: fake_plugin2\",\n \"Description: fake desc 2\", \"Version: 1.0\", \" * Classname: org.fake2\"));\n }\n- \n+\n public void testPluginWithoutVerboseMultiplePlugins() throws Exception {\n buildFakePlugin(env, \"fake desc 1\", \"fake_plugin1\", \"org.fake\");\n buildFakePlugin(env, \"fake desc 2\", \"fake_plugin2\", \"org.fake2\");\n MockTerminal terminal = listPlugins(home, new String[0]);\n String output = terminal.getOutput();\n assertEquals(output, buildMultiline(\"fake_plugin1\", \"fake_plugin2\"));\n }\n- \n+\n public void testPluginWithoutDescriptorFile() throws Exception{\n Files.createDirectories(env.pluginsFile().resolve(\"fake1\"));\n NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home));\n assertEquals(e.getFile(), env.pluginsFile().resolve(\"fake1\").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString());\n }\n- \n+\n public void testPluginWithWrongDescriptorFile() throws Exception{\n PluginTestUtil.writeProperties(env.pluginsFile().resolve(\"fake1\"),\n \"description\", \"fake desc\");\n IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> listPlugins(home));\n assertEquals(e.getMessage(), \"Property [name] is missing in [\" +\n env.pluginsFile().resolve(\"fake1\").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString() + \"]\");\n }\n- \n+\n }", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java", "status": "modified" }, { "diff": "@@ -140,7 +140,12 @@ public void testRemoveUninstalledPluginErrors() throws Exception {\n assertEquals(\"plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins\", e.getMessage());\n \n MockTerminal terminal = new MockTerminal();\n- new RemovePluginCommand().main(new String[] { \"-Epath.home=\" + home, \"fake\" }, terminal);\n+ new RemovePluginCommand() {\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n+ }.main(new String[] { \"-Epath.home=\" + home, \"fake\" }, terminal);\n try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) {\n assertEquals(\"-> Removing fake...\", reader.readLine());\n assertEquals(\"ERROR: plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins\",", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java", "status": "modified" }, { "diff": "@@ -50,6 +50,11 @@ void init(final boolean daemonize, final Path pidFile, final boolean quiet, fina\n init.set(true);\n initConsumer.accept(!daemonize, pidFile, quiet, esSettings);\n }\n+\n+ @Override\n+ protected boolean addShutdownHook() {\n+ return false;\n+ }\n }, terminal);\n assertThat(status, equalTo(expectedStatus));\n assertThat(init.get(), equalTo(expectedInit));", "filename": "test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java", "status": "modified" } ] }
{ "body": "Beginning from 5.0.0-beta1 SearchSourceBuilder adds an \"ext\" field. This way count queries can not work with queries generated using SearchSourceBuilder as ext is not supported. \n\nEmpty query by just creating new SearchSourceBuilder looks now like this:\n\n{\n \"ext\": {}\n}\n\n_search will return results, _count will return \n\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"parsing_exception\",\n \"reason\": \"request does not support [ext]\",\n \"line\": 2,\n \"col\": 3\n }\n ],\n \"type\": \"parsing_exception\",\n \"reason\": \"request does not support [ext]\",\n \"line\": 2,\n \"col\": 3\n },\n \"status\": 400\n}\n", "comments": [ { "body": "@javanna could you take a look at this please?\n", "created_at": "2016-10-17T12:44:03Z" }, { "body": "hi @arosenheinrich , thanks for opening this issue, sorry it took me a while to have a look at it. \r\n\r\nI see that we should probably not print out an empty `ext` object all the time, which I am going to address. That said I think you see this because you are sending via REST what gets generated through `SearchSourceBuilder`, is that right? If you use the Java API, you are one step ahead compared to other clients, parsing doesn't happen as you are already providing the parsed objects that will be sent around using the transport layer, hence it doesn't matter what the json representation of what you send is. That is why I don't expect this to be a problem in the Java API, but only in case you use SearchSourceBuilder to generate json that you then sent via REST layer. I'd appreciate if you can confirm this.", "created_at": "2016-12-09T21:35:20Z" }, { "body": "Indeed we are using the REST API here. Thanks for fixing it!", "created_at": "2016-12-12T12:57:03Z" } ], "number": 20969, "title": "SearchSourceBuilder not working for count" }
{ "body": "We shouldn't output an empty ext object if no ext sections have been set to the SearchSourceBuilder.\r\n\r\nCloses #20969", "number": 22093, "review_comments": [], "title": "Don't output empty ext object in SearchSourceBuilder#toXContent" }
{ "commits": [ { "message": "Don't output empty ext object in SearchSourceBuilder#toXContent\n\nWe shouldn't output an empty ext object if no ext sections have been set to the SearchSourceBuilder.\n\nCloses #20969" } ], "files": [ { "diff": "@@ -1228,7 +1228,7 @@ public void innerToXContent(XContentBuilder builder, Params params) throws IOExc\n builder.field(STATS_FIELD.getPreferredName(), stats);\n }\n \n- if (extBuilders != null) {\n+ if (extBuilders != null && extBuilders.isEmpty() == false) {\n builder.startObject(EXT_FIELD.getPreferredName());\n for (SearchExtBuilder extBuilder : extBuilders) {\n extBuilder.toXContent(builder, params);", "filename": "core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java", "status": "modified" }, { "diff": "@@ -31,10 +31,12 @@\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.common.xcontent.XContentHelper;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.index.query.QueryParseContext;\n+import org.elasticsearch.index.query.RandomQueryBuilder;\n import org.elasticsearch.search.AbstractSearchTestCase;\n import org.elasticsearch.search.rescore.QueryRescorerBuilder;\n import org.elasticsearch.search.sort.FieldSortBuilder;\n@@ -44,6 +46,7 @@\n import org.elasticsearch.test.EqualsHashCodeTestUtils;\n \n import java.io.IOException;\n+import java.util.Map;\n \n import static org.hamcrest.CoreMatchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n@@ -290,6 +293,29 @@ public void testTimeoutWithoutUnits() throws IOException {\n }\n }\n \n+ public void testToXContent() throws IOException {\n+ //verify that only what is set gets printed out through toXContent\n+ XContentType xContentType = randomFrom(XContentType.values());\n+ {\n+ SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();\n+ XContentBuilder builder = XContentFactory.contentBuilder(xContentType);\n+ searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ BytesReference source = builder.bytes();\n+ Map<String, Object> sourceAsMap = XContentHelper.convertToMap(source, false).v2();\n+ assertEquals(0, sourceAsMap.size());\n+ }\n+ {\n+ SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();\n+ searchSourceBuilder.query(RandomQueryBuilder.createQuery(random()));\n+ XContentBuilder builder = XContentFactory.contentBuilder(xContentType);\n+ searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ BytesReference source = builder.bytes();\n+ Map<String, Object> sourceAsMap = XContentHelper.convertToMap(source, false).v2();\n+ assertEquals(1, sourceAsMap.size());\n+ assertEquals(\"query\", sourceAsMap.keySet().iterator().next());\n+ }\n+ }\n+\n public void testEmptyPostFilter() throws IOException {\n SearchSourceBuilder builder = new SearchSourceBuilder();\n String query = \"{ \\\"post_filter\\\": {} }\";", "filename": "core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java", "status": "modified" } ] }
{ "body": "When a key is present in json object multiple times it doesn't raise a parse error and only last value is used. This should instead raise `json_parse_exception`.\n\n**Elasticsearch version**: verified on 2.x, 5.0.0-alpha3\n\n**Steps to reproduce**:\n1. `curl -X PUT localhost:9200/i -d '{\"settings\": {\"number_of_replicas\": 2}, \"settings\": {\"number_of_shards\": 1}}'`\n2. `curl -X GET localhost:9200/i`\n", "comments": [ { "body": "I could see this becoming a long discussion around whether that one is invalid json or not and whether we should return a parse exception or some other error. The json library we use for parsing allows this, then we should improve this on our end rather than being lenient.\n\nThis reminds me of #19547 too and is a very common problem with the way we pull parse json. It can easily be solved case by case but every single parser in our codebase is subject to this so it would be nice to have some generic solution for it. Not sure if there are alternatives to adding lots of ifs to all our pull parsers, we should evaluate that.\n", "created_at": "2016-07-27T09:12:57Z" }, { "body": "> The json library we use for parsing allows this, then we should improve this on our end rather than being lenient.\n\nFor the record, the option is `JsonParser.STRICT_DUPLICATE_DETECTION` and has the following warning:\n\n```\nNote that enabling this feature will incur performance overhead \ndue to having to store and check additional information: \nthis typically adds 20-30% to execution time for basic parsing.\n```\n", "created_at": "2016-07-27T10:00:08Z" }, { "body": "According to the JSON spec, this isn't invalid JSON. The spec doesn't mention how duplicate keys should be treated. Many languages will simply overwrite older values with newer values, without generating any warning. This is essentially what Elasticsearch does today, and i'm not sure it is worth a 20-30% penalty to prevent this behaviour.\n", "created_at": "2016-07-27T18:11:49Z" }, { "body": "Yes, strictly speaking (the rfc only says the keys **SHOULD** be unique), this is valid. I also agree that the performance penalty isn't worth it. It would, however, be nice to document this behavior and perhaps (if it's easy) have an option to turn on strict checking (ideally per request) - it would be useful as debugging tool and perhaps when running tests.\n", "created_at": "2016-07-27T19:19:37Z" }, { "body": "Allowing duplicate keys adds a lot of confusion: https://discuss.elastic.co/t/using-the-remove-processor-for-ingest-node/56500\n\nMaybe for certain apis we should enable strict parsing? (admin like APIs?)\n", "created_at": "2016-07-28T09:56:16Z" }, { "body": "Discussed in FixitFriday: let's play with the jackon feature to reject duplicated keys and make sure that it works and has a reasonable performance hit. If it is not satisfactory, then let's look into whether there are things that we can do at a higher level such as ObjectParser.\n", "created_at": "2016-07-29T09:52:22Z" }, { "body": "### Macrobenchmark Results\r\n\r\nWe have run our whole macrobenchmark suite with `JsonParser.STRICT_DUPLICATE_DETECTION == false` (`baseline`) and `JsonParser.STRICT_DUPLICATE_DETECTION == true` (`STRICT_DUPLICATE_DETECTION`).\r\n\r\nWe see at most a reduction in median indexing throughput of 3% for our macrobenchmark suite (PMC track). \r\n\r\n### Microbenchmark Results\r\n\r\nI also double-checked a few scenarios with a microbenchmark and saw similar results (see https://gist.github.com/danielmitterdorfer/9236796a46f3956447171313a6a0b365):\r\n\r\nBelow are the results of both configurations showing the average time for one iteration (smaller is better).\r\n\r\n`JsonParser.Feature.STRICT_DUPLICATE_DETECTION: false`:\r\n\r\n\r\n```\r\nBenchmark Mode Cnt Score Error Units\r\nJsonParserBenchmark.largeJson avgt 60 19.414 ± 0.044 us/op\r\nJsonParserBenchmark.smallJson avgt 60 0.479 ± 0.001 us/op\r\n```\r\n\r\n`JsonParser.Feature.STRICT_DUPLICATE_DETECTION: true`:\r\n\r\n```\r\nBenchmark Mode Cnt Score Error Units\r\nJsonParserBenchmark.largeJson avgt 60 20.642 ± 0.064 us/op\r\nJsonParserBenchmark.smallJson avgt 60 0.487 ± 0.001 us/op\r\n```\r\n\r\nFor smaller JSON objects (49 bytes) the overhead of duplication check is 8ns or 1.6%. For a large JSON object (6440 bytes) the overhead of duplication check is in the range 1.12us [1] and 1.3us [2] or in the range 5.8% and 6.7%.\r\n\r\n[1] best case duplication check enabled 20.578 us, worst case duplication check enabled: 19.458 us\r\n[2] worst case duplication check enabled: 20.706 us, best case duplication check disabled: 19.370 us\r\n\r\nPlease refer to the gist for more details.", "created_at": "2016-12-01T12:46:10Z" }, { "body": "Thanks @danielmitterdorfer. To me that means we should do it. We can have an undocumented escape hatch if we do not feel confident the overhead will be low in all cases.", "created_at": "2016-12-01T13:08:34Z" }, { "body": "> We can have an undocumented escape hatch\r\n\r\n@jpountz The [relevant code](https://github.com/elastic/elasticsearch/blob/27ff4f3/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java#L52-L60) is in a `static` block so we can't use our settings infrastructure. I guess that means we'd use a system property?", "created_at": "2016-12-01T13:45:04Z" }, { "body": "That would work for me. Or we could handle it like `INDICES_MAX_CLAUSE_COUNT_SETTING` I suppose, which is a node setting that sets the static limit on the number of boolean clauses.", "created_at": "2016-12-01T13:51:24Z" }, { "body": "Thanks @danielmitterdorfer. \r\n\r\nI agree with @jpountz, and a first step would be to see if our tests pass (I'm pretty sure we will have to adapt some of them). Also, the same JSON factory is used for both parsing and generating JSON: if we enable this feature then we'll also see if we generate duplicate keys somewhere, which is cool. \r\n", "created_at": "2016-12-05T09:24:58Z" } ], "number": 19614, "title": "Elasticsearch accepts invalid json with unpredictable behavior" }
{ "body": "With this commit we enable the Jackson feature 'STRICT_DUPLICATE_DETECTION'\r\nby default. This ensures that JSON keys are always unique. While this has\r\na performance impact, benchmarking has indicated that the typical drop in\r\nindexing throughput is around 1 - 2%.\r\n\r\nAs a last resort, we allow users to still disable strict duplicate checks\r\nby setting ~`-Des.json.strict_duplicate_detection=false`~\r\n`-Des.xcontent.strict_duplicate_detection=false` which is\r\nintentionally undocumented.\r\n\r\n_Edit: In a later PR (https://github.com/elastic/elasticsearch/pull/28768) the name of this system property has changed from `es.json.strict_duplicate_detection` (valid for Elasticsearch versions from 6.0.0 up to but not including 6.3.0) to `es.xcontent.strict_duplicate_detection` (valid from Elasticsearch 6.3.0 and onwards). Note that this escape hatch only exists for the 6.x series. This PR's description is now updated to reflect that change._\r\n\r\nCloses #19614", "number": 22073, "review_comments": [ { "body": "For those following along at home, this is required because the example data had a duplicate key and @danielmitterdorfer switched it to `min` in the file.", "created_at": "2016-12-09T16:41:48Z" }, { "body": "So this is one problem with doing this as a read-on-startup property. We can no longer test things like this. This test is fairly worthless unless the property is `false` and we have to manually perform the duplication detection.\r\n\r\nI think it'd be worth leaving a comment about that.", "created_at": "2016-12-09T16:44:29Z" }, { "body": "Same here - this test is fairly out of date now.", "created_at": "2016-12-09T16:46:03Z" }, { "body": "Same here.\r\n\r\nI wonder if it makes more sense to leave these tests as they are and skip them if the value of the duplicate check property is `true`. That way if we really want we can set it to false and run the tests again. And if they have a compile time reference to the duplicate check property when we remove the duplicate check property the test will stop compiling and we can use that as a reminder to remove that particular manual duplicate check.", "created_at": "2016-12-09T16:48:01Z" }, { "body": "Same here.", "created_at": "2016-12-09T16:48:14Z" }, { "body": "++", "created_at": "2016-12-09T16:48:28Z" }, { "body": "s/will enforce/enforces/", "created_at": "2016-12-09T16:49:08Z" }, { "body": "Is this a leftover or did you move it or something? It feels like it doesn't belong.", "created_at": "2016-12-09T16:49:52Z" }, { "body": "Same here with regards to the test.", "created_at": "2016-12-09T16:51:03Z" }, { "body": "I'm not adverse to adding it here, I just want to understand it.", "created_at": "2016-12-09T16:51:59Z" }, { "body": "Great idea; I love it. I'll revert the tests to their previous state, add the compile time constant and skip the tests as you've suggested.", "created_at": "2016-12-12T08:33:45Z" }, { "body": "> So this is one problem with doing this as a read-on-startup property.\r\n\r\nI agree that testing is not as simple as it could be but my priority was that the production code is dead-simple (i.e. no fancy abilities to change the JSON factory at runtime after initial creation). Given the fact that it's likely that we will enable this check without an ability to opt-out at some point in the future, I felt this was the right trade-off.", "created_at": "2016-12-12T09:02:29Z" }, { "body": "With strict duplicate checks enabled we would throw an error because `size` is specified twice (once by the user, once added internally by our implementation of `TransportPercolateAction`) and this error message gives the user a clear feedback to the user what is wrong instead of just giving a (technically correct but) misleading error to the user about a \"duplicate field size\".", "created_at": "2016-12-12T09:28:28Z" }, { "body": "https://www.youtube.com/watch?v=hou0lU8WMgo", "created_at": "2016-12-13T14:40:36Z" }, { "body": "Is this exception still a problem?", "created_at": "2016-12-13T14:42:21Z" }, { "body": "What about `assumeTrue(\"Test only makes sense if json parser doesn't have strict duplicate checks enabled and can be removed if we force it to be enabled\", JsonXContent.isStrictDuplicateDetectionEnabled());`?", "created_at": "2016-12-13T14:44:08Z" }, { "body": "`assumeFalse` here too?", "created_at": "2016-12-13T14:44:26Z" }, { "body": "It might be worth writing a blog post around how to migrate documents with these duplicate keys. Because they'll blow up if you try to highlight a field or do source filtering. But they *shouldn't* blow up if you just return the `_source` without filtering.", "created_at": "2016-12-13T14:47:11Z" }, { "body": "Cool.", "created_at": "2016-12-13T14:48:14Z" }, { "body": "Now these make more sense.", "created_at": "2016-12-13T14:48:41Z" }, { "body": "Yes it is. A user could still pass `-Des.json.strict_duplicate_detection=complete_bogus` and provoke an exception when we try to convert the system property to a `boolean`. I don't want to be lenient and terminate early then.", "created_at": "2016-12-13T15:10:09Z" }, { "body": "Oh, I didn't know about `assumeTrue` / `assumeFalse` and have only noticed the approach that I've used in the code base. Btw, I think this should be `assumeFalse`. But this makes sense and I'll change the tests as you've suggested. Thanks for the pointer.", "created_at": "2016-12-13T15:11:50Z" }, { "body": "Yes, I'll change all affected tests accordingly.", "created_at": "2016-12-13T15:13:22Z" }, { "body": "If they specify a non-boolean I think they'll get `false`.", "created_at": "2016-12-13T15:22:39Z" }, { "body": "I agree that we should only support `true` and `false` though.", "created_at": "2016-12-13T15:23:04Z" }, { "body": "Cool. I think I did have it backwards.", "created_at": "2016-12-13T15:23:38Z" }, { "body": "I'll add it to my backlog and look into it.", "created_at": "2016-12-13T15:26:46Z" } ], "title": "Enable strict duplicate checks for JSON content" }
{ "commits": [ { "message": "Enable strict duplicate checks for JSON content\n\nWith this commit we enable the Jackson feature 'STRICT_DUPLICATE_DETECTION'\nby default. This ensures that JSON keys are always unique. While this has\na performance impact, benchmarking has indicated that the typical drop in\nindexing throughput is around 1 - 2%.\n\nAs a last resort, we allow users to still disable strict duplicate checks\nby setting `-Des.json.strict_duplicate_detection=false` which is\nintentionally undocumented.\n\nCloses #19614" }, { "message": "Run custom duplicate check tests iff strict duplicate checks are disabled" }, { "message": "Correct grammar in migration docs" }, { "message": "Merge remote-tracking branch 'origin/master' into json_strict_duplicate_checks" }, { "message": "Merge remote-tracking branch 'origin/master' into json_strict_duplicate_checks" }, { "message": "Test strict duplicate check" }, { "message": "Use assume* instead of custom skipping logic" }, { "message": "Merge remote-tracking branch 'origin/master' into json_strict_duplicate_checks" }, { "message": "Strict parsing of boolean system property in JsonXContent" }, { "message": "Merge remote-tracking branch 'origin/master' into json_strict_duplicate_checks" }, { "message": "Add missing import" } ], "files": [ { "diff": "@@ -23,6 +23,7 @@\n import com.fasterxml.jackson.core.JsonFactory;\n import com.fasterxml.jackson.core.JsonGenerator;\n import com.fasterxml.jackson.core.JsonParser;\n+import org.elasticsearch.common.Booleans;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.FastStringReader;\n import org.elasticsearch.common.xcontent.XContent;\n@@ -45,17 +46,39 @@ public class JsonXContent implements XContent {\n public static XContentBuilder contentBuilder() throws IOException {\n return XContentBuilder.builder(jsonXContent);\n }\n-\n private static final JsonFactory jsonFactory;\n+\n public static final JsonXContent jsonXContent;\n \n+ /*\n+ * NOTE: This comment is only meant for maintainers of the Elasticsearch code base and is intentionally not a Javadoc comment as it\n+ * describes an undocumented system property.\n+ *\n+ *\n+ * Determines whether the JSON parser will always check for duplicate keys in JSON content. This behavior is enabled by default but\n+ * can be disabled by setting the otherwise undocumented system property \"es.json.strict_duplicate_detection\" to \"false\".\n+ *\n+ * Before we've enabled this mode, we had custom duplicate checks in various parts of the code base. As the user can still disable this\n+ * mode and fall back to the legacy duplicate checks, we still need to keep the custom duplicate checks around and we also need to keep\n+ * the tests around.\n+ *\n+ * If this fallback via system property is removed one day in the future you can remove all tests that call this method and also remove\n+ * the corresponding custom duplicate check code.\n+ *\n+ */\n+ public static boolean isStrictDuplicateDetectionEnabled() {\n+ // Don't allow duplicate keys in JSON content by default but let the user opt out\n+ return Booleans.parseBooleanExact(System.getProperty(\"es.json.strict_duplicate_detection\", \"true\"));\n+ }\n+\n static {\n jsonFactory = new JsonFactory();\n jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);\n jsonFactory.configure(JsonParser.Feature.ALLOW_COMMENTS, true);\n jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...\n // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method\n jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);\n+ jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, isStrictDuplicateDetectionEnabled());\n jsonXContent = new JsonXContent();\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java", "status": "modified" }, { "diff": "@@ -67,7 +67,7 @@ public void testFieldsParsing() throws Exception {\n assertThat(request.getIndexConstraints()[3].getComparison(), equalTo(LTE));\n assertThat(request.getIndexConstraints()[4].getField(), equalTo(\"field5\"));\n assertThat(request.getIndexConstraints()[4].getValue(), equalTo(\"2\"));\n- assertThat(request.getIndexConstraints()[4].getProperty(), equalTo(MAX));\n+ assertThat(request.getIndexConstraints()[4].getProperty(), equalTo(MIN));\n assertThat(request.getIndexConstraints()[4].getComparison(), equalTo(GT));\n assertThat(request.getIndexConstraints()[5].getField(), equalTo(\"field5\"));\n assertThat(request.getIndexConstraints()[5].getValue(), equalTo(\"9\"));", "filename": "core/src/test/java/org/elasticsearch/action/fieldstats/FieldStatsRequestTests.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.ElasticsearchParseException;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.settings.SettingsException;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.test.ESTestCase;\n \n import static org.hamcrest.CoreMatchers.containsString;\n@@ -48,6 +49,8 @@ public void testSimpleJsonSettings() throws Exception {\n }\n \n public void testDuplicateKeysThrowsException() {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n final String json = \"{\\\"foo\\\":\\\"bar\\\",\\\"foo\\\":\\\"baz\\\"}\";\n final SettingsException e = expectThrows(SettingsException.class, () -> Settings.builder().loadFromSource(json).build());\n assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);", "filename": "core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java", "status": "modified" }, { "diff": "@@ -98,7 +98,7 @@ public void testRandomOrder() throws Exception {\n }\n \n public void testMissingAllConstructorArgs() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"mineral\\\": 1\\n\"\n + \"}\");\n@@ -113,7 +113,7 @@ public void testMissingAllConstructorArgs() throws IOException {\n }\n \n public void testMissingAllConstructorArgsButNotRequired() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"mineral\\\": 1\\n\"\n + \"}\");\n@@ -122,7 +122,7 @@ public void testMissingAllConstructorArgsButNotRequired() throws IOException {\n }\n \n public void testMissingSecondConstructorArg() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"mineral\\\": 1,\\n\"\n + \" \\\"animal\\\": \\\"cat\\\"\\n\"\n@@ -133,7 +133,7 @@ public void testMissingSecondConstructorArg() throws IOException {\n }\n \n public void testMissingSecondConstructorArgButNotRequired() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"mineral\\\": 1,\\n\"\n + \" \\\"animal\\\": \\\"cat\\\"\\n\"\n@@ -146,7 +146,7 @@ public void testMissingSecondConstructorArgButNotRequired() throws IOException {\n }\n \n public void testMissingFirstConstructorArg() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"mineral\\\": 1,\\n\"\n + \" \\\"vegetable\\\": 2\\n\"\n@@ -158,7 +158,7 @@ public void testMissingFirstConstructorArg() throws IOException {\n }\n \n public void testMissingFirstConstructorArgButNotRequired() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"mineral\\\": 1,\\n\"\n + \" \\\"vegetable\\\": 2\\n\"\n@@ -169,7 +169,9 @@ public void testMissingFirstConstructorArgButNotRequired() throws IOException {\n }\n \n public void testRepeatedConstructorParam() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"vegetable\\\": 1,\\n\"\n + \" \\\"vegetable\\\": 2\\n\"\n@@ -182,7 +184,7 @@ public void testRepeatedConstructorParam() throws IOException {\n }\n \n public void testBadParam() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"animal\\\": \\\"cat\\\",\\n\"\n + \" \\\"vegetable\\\": 2,\\n\"\n@@ -196,7 +198,7 @@ public void testBadParam() throws IOException {\n }\n \n public void testBadParamBeforeObjectBuilt() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"a\\\": \\\"supercalifragilisticexpialidocious\\\",\\n\"\n + \" \\\"animal\\\": \\\"cat\\\"\\n,\"\n@@ -256,7 +258,7 @@ void setFoo(String foo) {\n parser.declareString(ctorArgOptional ? optionalConstructorArg() : constructorArg(), new ParseField(\"yeah\"));\n \n // ctor arg first so we can test for the bug we found one time\n- XContentParser xcontent = createParser(JsonXContent.jsonXContent, \n+ XContentParser xcontent = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"yeah\\\": \\\"!\\\",\\n\"\n + \" \\\"foo\\\": \\\"foo\\\"\\n\"\n@@ -265,7 +267,7 @@ void setFoo(String foo) {\n assertTrue(result.fooSet);\n \n // and ctor arg second just in case\n- xcontent = createParser(JsonXContent.jsonXContent, \n+ xcontent = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"foo\\\": \\\"foo\\\",\\n\"\n + \" \\\"yeah\\\": \\\"!\\\"\\n\"\n@@ -275,7 +277,7 @@ void setFoo(String foo) {\n \n if (ctorArgOptional) {\n // and without the constructor arg if we've made it optional\n- xcontent = createParser(JsonXContent.jsonXContent, \n+ xcontent = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"foo\\\": \\\"foo\\\"\\n\"\n + \"}\");\n@@ -285,7 +287,7 @@ void setFoo(String foo) {\n }\n \n public void testIgnoreUnknownFields() throws IOException {\n- XContentParser parser = createParser(JsonXContent.jsonXContent, \n+ XContentParser parser = createParser(JsonXContent.jsonXContent,\n \"{\\n\"\n + \" \\\"test\\\" : \\\"foo\\\",\\n\"\n + \" \\\"junk\\\" : 2\\n\"", "filename": "core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import com.fasterxml.jackson.core.JsonFactory;\n import com.fasterxml.jackson.core.JsonGenerator;\n \n+import com.fasterxml.jackson.core.JsonParseException;\n import org.elasticsearch.common.xcontent.BaseXContentTestCase;\n import org.elasticsearch.common.xcontent.XContentType;\n \n@@ -39,4 +40,13 @@ public void testBigInteger() throws Exception {\n JsonGenerator generator = new JsonFactory().createGenerator(os);\n doTestBigInteger(generator, os);\n }\n+\n+ public void testChecksForDuplicates() throws Exception {\n+ assumeTrue(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n+\n+ JsonParseException pex = expectThrows(JsonParseException.class,\n+ () -> XContentType.JSON.xContent().createParser(\"{ \\\"key\\\": 1, \\\"key\\\": 2 }\").map());\n+ assertEquals(\"Duplicate field 'key'\", pex.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java", "status": "modified" }, { "diff": "@@ -83,8 +83,8 @@ public void testCopyToFieldsParsing() throws Exception {\n assertThat(copyTestMap.get(\"type\").toString(), is(\"text\"));\n List<String> copyToList = (List<String>) copyTestMap.get(\"copy_to\");\n assertThat(copyToList.size(), equalTo(2));\n- assertThat(copyToList.get(0).toString(), equalTo(\"another_field\"));\n- assertThat(copyToList.get(1).toString(), equalTo(\"cyclic_test\"));\n+ assertThat(copyToList.get(0), equalTo(\"another_field\"));\n+ assertThat(copyToList.get(1), equalTo(\"cyclic_test\"));\n \n // Check data parsing\n BytesReference json = jsonBuilder().startObject()\n@@ -312,44 +312,43 @@ public void testCopyToFieldMerge() throws Exception {\n public void testCopyToNestedField() throws Exception {\n IndexService indexService = createIndex(\"test\");\n DocumentMapperParser parser = indexService.mapperService().documentMapperParser();\n- for (boolean mapped : new boolean[] {true, false}) {\n- XContentBuilder mapping = jsonBuilder().startObject()\n- .startObject(\"type\")\n- .startObject(\"properties\")\n- .startObject(\"target\")\n- .field(\"type\", \"long\")\n- .field(\"doc_values\", false)\n- .endObject()\n- .startObject(\"n1\")\n- .field(\"type\", \"nested\")\n- .startObject(\"properties\")\n- .startObject(\"target\")\n- .field(\"type\", \"long\")\n- .field(\"doc_values\", false)\n+ XContentBuilder mapping = jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"target\")\n+ .field(\"type\", \"long\")\n+ .field(\"doc_values\", false)\n+ .endObject()\n+ .startObject(\"n1\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"target\")\n+ .field(\"type\", \"long\")\n+ .field(\"doc_values\", false)\n+ .endObject()\n+ .startObject(\"n2\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"target\")\n+ .field(\"type\", \"long\")\n+ .field(\"doc_values\", false)\n+ .endObject()\n+ .startObject(\"source\")\n+ .field(\"type\", \"long\")\n+ .field(\"doc_values\", false)\n+ .startArray(\"copy_to\")\n+ .value(\"target\") // should go to the root doc\n+ .value(\"n1.target\") // should go to the parent doc\n+ .value(\"n1.n2.target\") // should go to the current doc\n+ .endArray()\n+ .endObject()\n .endObject()\n- .startObject(\"n2\")\n- .field(\"type\", \"nested\")\n- .startObject(\"properties\")\n- .startObject(\"target\")\n- .field(\"type\", \"long\")\n- .field(\"doc_values\", false)\n- .endObject()\n- .startObject(\"source\")\n- .field(\"type\", \"long\")\n- .field(\"doc_values\", false)\n- .startArray(\"copy_to\")\n- .value(\"target\") // should go to the root doc\n- .value(\"n1.target\") // should go to the parent doc\n- .value(\"n1.n2.target\") // should go to the current doc\n- .endArray()\n- .endObject();\n- for (int i = 0; i < 3; ++i) {\n- if (mapped) {\n- mapping = mapping.startObject(\"target\").field(\"type\", \"long\").field(\"doc_values\", false).endObject();\n- }\n- mapping = mapping.endObject().endObject();\n- }\n- mapping = mapping.endObject();\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject();\n \n DocumentMapper mapper = parser.parse(\"type\", new CompressedXContent(mapping.string()));\n \n@@ -376,39 +375,38 @@ public void testCopyToNestedField() throws Exception {\n .endArray()\n .endObject();\n \n- ParsedDocument doc = mapper.parse(\"test\", \"type\", \"1\", jsonDoc.bytes());\n- assertEquals(6, doc.docs().size());\n-\n- Document nested = doc.docs().get(0);\n- assertFieldValue(nested, \"n1.n2.target\", 7L);\n- assertFieldValue(nested, \"n1.target\");\n- assertFieldValue(nested, \"target\");\n-\n- nested = doc.docs().get(2);\n- assertFieldValue(nested, \"n1.n2.target\", 5L);\n- assertFieldValue(nested, \"n1.target\");\n- assertFieldValue(nested, \"target\");\n-\n- nested = doc.docs().get(3);\n- assertFieldValue(nested, \"n1.n2.target\", 3L);\n- assertFieldValue(nested, \"n1.target\");\n- assertFieldValue(nested, \"target\");\n-\n- Document parent = doc.docs().get(1);\n- assertFieldValue(parent, \"target\");\n- assertFieldValue(parent, \"n1.target\", 7L);\n- assertFieldValue(parent, \"n1.n2.target\");\n-\n- parent = doc.docs().get(4);\n- assertFieldValue(parent, \"target\");\n- assertFieldValue(parent, \"n1.target\", 3L, 5L);\n- assertFieldValue(parent, \"n1.n2.target\");\n-\n- Document root = doc.docs().get(5);\n- assertFieldValue(root, \"target\", 3L, 5L, 7L);\n- assertFieldValue(root, \"n1.target\");\n- assertFieldValue(root, \"n1.n2.target\");\n- }\n+ ParsedDocument doc = mapper.parse(\"test\", \"type\", \"1\", jsonDoc.bytes());\n+ assertEquals(6, doc.docs().size());\n+\n+ Document nested = doc.docs().get(0);\n+ assertFieldValue(nested, \"n1.n2.target\", 7L);\n+ assertFieldValue(nested, \"n1.target\");\n+ assertFieldValue(nested, \"target\");\n+\n+ nested = doc.docs().get(2);\n+ assertFieldValue(nested, \"n1.n2.target\", 5L);\n+ assertFieldValue(nested, \"n1.target\");\n+ assertFieldValue(nested, \"target\");\n+\n+ nested = doc.docs().get(3);\n+ assertFieldValue(nested, \"n1.n2.target\", 3L);\n+ assertFieldValue(nested, \"n1.target\");\n+ assertFieldValue(nested, \"target\");\n+\n+ Document parent = doc.docs().get(1);\n+ assertFieldValue(parent, \"target\");\n+ assertFieldValue(parent, \"n1.target\", 7L);\n+ assertFieldValue(parent, \"n1.n2.target\");\n+\n+ parent = doc.docs().get(4);\n+ assertFieldValue(parent, \"target\");\n+ assertFieldValue(parent, \"n1.target\", 3L, 5L);\n+ assertFieldValue(parent, \"n1.n2.target\");\n+\n+ Document root = doc.docs().get(5);\n+ assertFieldValue(root, \"target\", 3L, 5L, 7L);\n+ assertFieldValue(root, \"n1.target\");\n+ assertFieldValue(root, \"n1.n2.target\");\n }\n \n public void testCopyToDynamicNestedObjectParsing() throws Exception {", "filename": "core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentType;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.search.internal.SearchContext;\n import org.elasticsearch.test.AbstractQueryTestCase;\n import org.hamcrest.Matchers;\n@@ -339,6 +340,8 @@ public void testUnknownQueryName() throws IOException {\n * test that two queries in object throws error\n */\n public void testTooManyQueriesInObject() throws IOException {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n String clauseType = randomFrom(\"must\", \"should\", \"must_not\", \"filter\");\n // should also throw error if invalid query is preceded by a valid one\n String query = \"{\\n\" +", "filename": "core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.search.ConstantScoreQuery;\n import org.apache.lucene.search.Query;\n import org.elasticsearch.common.ParsingException;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.search.internal.SearchContext;\n import org.elasticsearch.test.AbstractQueryTestCase;\n \n@@ -65,6 +66,8 @@ public void testFilterElement() throws IOException {\n * test that multiple \"filter\" elements causes {@link ParsingException}\n */\n public void testMultipleFilterElements() throws IOException {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n String queryString = \"{ \\\"\" + ConstantScoreQueryBuilder.NAME + \"\\\" : {\\n\" +\n \"\\\"filter\\\" : { \\\"term\\\": { \\\"foo\\\": \\\"a\\\" } },\\n\" +\n \"\\\"filter\\\" : { \\\"term\\\": { \\\"foo\\\": \\\"x\\\" } },\\n\" +", "filename": "core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -38,6 +38,7 @@\n import org.elasticsearch.common.unit.DistanceUnit;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentType;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.index.query.AbstractQueryBuilder;\n import org.elasticsearch.index.query.MatchAllQueryBuilder;\n import org.elasticsearch.index.query.QueryBuilder;\n@@ -730,6 +731,8 @@ public void testMalformedQueryMultipleQueryObjects() throws IOException {\n }\n \n public void testMalformedQueryMultipleQueryElements() throws IOException {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n String json = \"{\\n\" +\n \" \\\"function_score\\\":{\\n\" +\n \" \\\"query\\\":{\\n\" +", "filename": "core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -21,23 +21,18 @@\n \n import org.elasticsearch.common.ParseFieldMatcher;\n import org.elasticsearch.common.ParsingException;\n-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.env.Environment;\n+import org.elasticsearch.test.AbstractQueryTestCase;\n import org.elasticsearch.index.query.QueryParseContext;\n-import org.elasticsearch.indices.IndicesModule;\n import org.elasticsearch.indices.query.IndicesQueriesRegistry;\n import org.elasticsearch.script.ScriptService;\n import org.elasticsearch.search.SearchModule;\n-import org.elasticsearch.test.AbstractQueryTestCase;\n import org.elasticsearch.test.ESTestCase;\n \n-import java.util.ArrayList;\n-import java.util.Collections;\n-import java.util.List;\n import java.util.Random;\n import java.util.regex.Matcher;\n import java.util.regex.Pattern;\n@@ -68,11 +63,7 @@ public void setUp() throws Exception {\n Settings settings = Settings.builder().put(\"node.name\", AbstractQueryTestCase.class.toString())\n .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())\n .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false).build();\n- IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) ;\n SearchModule searchModule = new SearchModule(settings, false, emptyList());\n- List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();\n- entries.addAll(indicesModule.getNamedWriteables());\n- entries.addAll(searchModule.getNamedWriteables());\n aggParsers = searchModule.getSearchRequestParsers().aggParsers;\n // create some random type with some default field, those types will\n // stick around for all of the subclasses\n@@ -113,6 +104,8 @@ public void testTwoTypes() throws Exception {\n }\n \n public void testTwoAggs() throws Exception {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n XContentBuilder source = JsonXContent.contentBuilder()\n .startObject()\n .startObject(\"by_date\")\n@@ -187,6 +180,8 @@ public void testInvalidAggregationName() throws Exception {\n }\n \n public void testSameAggregationName() throws Exception {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n final String name = randomAsciiOfLengthBetween(1, 10);\n XContentBuilder source = JsonXContent.contentBuilder()\n .startObject()", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java", "status": "modified" }, { "diff": "@@ -506,120 +506,150 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception {\n \n public void testSortNestedWithNestedFilter() throws Exception {\n assertAcked(prepareCreate(\"test\")\n- .addMapping(\"type1\", XContentFactory.jsonBuilder().startObject()\n+ .addMapping(\"type1\", XContentFactory.jsonBuilder()\n+ .startObject()\n .startObject(\"type1\")\n- .startObject(\"properties\")\n- .startObject(\"grand_parent_values\").field(\"type\", \"long\").endObject()\n- .startObject(\"parent\").field(\"type\", \"nested\")\n- .startObject(\"properties\")\n- .startObject(\"parent_values\").field(\"type\", \"long\").endObject()\n- .startObject(\"child\").field(\"type\", \"nested\")\n- .startObject(\"properties\")\n- .startObject(\"child_values\").field(\"type\", \"long\").endObject()\n- .endObject()\n- .endObject()\n- .endObject()\n- .endObject()\n- .endObject()\n+ .startObject(\"properties\")\n+ .startObject(\"grand_parent_values\")\n+ .field(\"type\", \"long\")\n+ .endObject()\n+ .startObject(\"parent\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"parent_values\")\n+ .field(\"type\", \"long\")\n+ .endObject()\n+ .startObject(\"child\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"child_values\")\n+ .field(\"type\", \"long\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n .endObject()\n- .endObject()));\n+ .endObject()));\n ensureGreen();\n \n // sum: 11\n- client().prepareIndex(\"test\", \"type1\", Integer.toString(1)).setSource(jsonBuilder().startObject()\n+ client().prepareIndex(\"test\", \"type1\", Integer.toString(1)).setSource(jsonBuilder()\n+ .startObject()\n .field(\"grand_parent_values\", 1L)\n- .startObject(\"parent\")\n- .field(\"filter\", false)\n- .field(\"parent_values\", 1L)\n- .startObject(\"child\")\n- .field(\"filter\", true)\n- .field(\"child_values\", 1L)\n- .startObject(\"child_obj\")\n- .field(\"value\", 1L)\n- .endObject()\n- .endObject()\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", 6L)\n- .endObject()\n- .endObject()\n- .startObject(\"parent\")\n- .field(\"filter\", true)\n- .field(\"parent_values\", 2L)\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", -1L)\n- .endObject()\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", 5L)\n- .endObject()\n- .endObject()\n- .endObject()).execute().actionGet();\n+ .startArray(\"parent\")\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"parent_values\", 1L)\n+ .startArray(\"child\")\n+ .startObject()\n+ .field(\"filter\", true)\n+ .field(\"child_values\", 1L)\n+ .startObject(\"child_obj\")\n+ .field(\"value\", 1L)\n+ .endObject()\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", 6L)\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", true)\n+ .field(\"parent_values\", 2L)\n+ .startArray(\"child\")\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", -1L)\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", 5L)\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .endArray()\n+ .endObject()).execute().actionGet();\n \n // sum: 7\n- client().prepareIndex(\"test\", \"type1\", Integer.toString(2)).setSource(jsonBuilder().startObject()\n+ client().prepareIndex(\"test\", \"type1\", Integer.toString(2)).setSource(jsonBuilder()\n+ .startObject()\n .field(\"grand_parent_values\", 2L)\n- .startObject(\"parent\")\n- .field(\"filter\", false)\n- .field(\"parent_values\", 2L)\n- .startObject(\"child\")\n- .field(\"filter\", true)\n- .field(\"child_values\", 2L)\n- .startObject(\"child_obj\")\n- .field(\"value\", 2L)\n- .endObject()\n- .endObject()\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", 4L)\n- .endObject()\n- .endObject()\n- .startObject(\"parent\")\n- .field(\"parent_values\", 3L)\n- .field(\"filter\", true)\n- .startObject(\"child\")\n- .field(\"child_values\", -2L)\n- .field(\"filter\", false)\n- .endObject()\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", 3L)\n- .endObject()\n- .endObject()\n+ .startArray(\"parent\")\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"parent_values\", 2L)\n+ .startArray(\"child\")\n+ .startObject()\n+ .field(\"filter\", true)\n+ .field(\"child_values\", 2L)\n+ .startObject(\"child_obj\")\n+ .field(\"value\", 2L)\n+ .endObject()\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", 4L)\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"parent_values\", 3L)\n+ .field(\"filter\", true)\n+ .startArray(\"child\")\n+ .startObject()\n+ .field(\"child_values\", -2L)\n+ .field(\"filter\", false)\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", 3L)\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .endArray()\n .endObject()).execute().actionGet();\n \n // sum: 2\n- client().prepareIndex(\"test\", \"type1\", Integer.toString(3)).setSource(jsonBuilder().startObject()\n+ client().prepareIndex(\"test\", \"type1\", Integer.toString(3)).setSource(jsonBuilder()\n+ .startObject()\n .field(\"grand_parent_values\", 3L)\n- .startObject(\"parent\")\n- .field(\"parent_values\", 3L)\n- .field(\"filter\", false)\n- .startObject(\"child\")\n- .field(\"filter\", true)\n- .field(\"child_values\", 3L)\n- .startObject(\"child_obj\")\n- .field(\"value\", 3L)\n- .endObject()\n- .endObject()\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", 1L)\n- .endObject()\n- .endObject()\n- .startObject(\"parent\")\n- .field(\"parent_values\", 4L)\n- .field(\"filter\", true)\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", -3L)\n- .endObject()\n- .startObject(\"child\")\n- .field(\"filter\", false)\n- .field(\"child_values\", 1L)\n- .endObject()\n- .endObject()\n- .endObject()).execute().actionGet();\n+ .startArray(\"parent\")\n+ .startObject()\n+ .field(\"parent_values\", 3L)\n+ .field(\"filter\", false)\n+ .startArray(\"child\")\n+ .startObject()\n+ .field(\"filter\", true)\n+ .field(\"child_values\", 3L)\n+ .startObject(\"child_obj\")\n+ .field(\"value\", 3L)\n+ .endObject()\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", 1L)\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"parent_values\", 4L)\n+ .field(\"filter\", true)\n+ .startArray(\"child\")\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", -3L)\n+ .endObject()\n+ .startObject()\n+ .field(\"filter\", false)\n+ .field(\"child_values\", 1L)\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .endArray()\n+ .endObject()).execute().actionGet();\n refresh();\n \n // Without nested filter", "filename": "core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java", "status": "modified" }, { "diff": "@@ -149,9 +149,13 @@ public void testIllegalXContent() throws IOException {\n \"Required [field]\");\n \n // test two fieldnames\n- directGenerator = \"{ \\\"field\\\" : \\\"f1\\\", \\\"field\\\" : \\\"f2\\\" }\";\n- assertIllegalXContent(directGenerator, ParsingException.class,\n+ if (JsonXContent.isStrictDuplicateDetectionEnabled()) {\n+ logger.info(\"Skipping test as it uses a custom duplicate check that is obsolete when strict duplicate checks are enabled.\");\n+ } else {\n+ directGenerator = \"{ \\\"field\\\" : \\\"f1\\\", \\\"field\\\" : \\\"f2\\\" }\";\n+ assertIllegalXContent(directGenerator, ParsingException.class,\n \"[direct_generator] failed to parse field [field]\");\n+ }\n \n // test unknown field\n directGenerator = \"{ \\\"unknown_param\\\" : \\\"f1\\\" }\";", "filename": "core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java", "status": "modified" }, { "diff": "@@ -22,7 +22,7 @@\n }\n },\n \"field5\": {\n- \"max_value\" : {\n+ \"min_value\" : {\n \"gt\": 2\n },\n \"max_value\" : {", "filename": "core/src/test/resources/org/elasticsearch/action/fieldstats/fieldstats-index-constraints-request.json", "status": "modified" }, { "diff": "@@ -8,6 +8,11 @@ This feature was removed in the 5.x series, but a backwards-compatibility layer\n system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compatibility layer\n has been removed in Elasticsearch 6.0.0.\n \n+==== Duplicate Keys in JSON\n+\n+In previous versions of Elasticsearch, JSON documents were allowed to contain duplicate keys. Elasticsearch 6.0.0\n+ enforces that all keys are unique.\n+\n ==== Analyze API changes\n \n The deprecated request parameters and plain text in request body has been removed. Define parameters in request body.", "filename": "docs/reference/migration/migrate_6_0/rest.asciidoc", "status": "modified" }, { "diff": "@@ -196,10 +196,24 @@ public void testDateObjectMethods() throws Exception {\n public void testMultiValueMethods() throws Exception {\n ElasticsearchAssertions.assertAcked(prepareCreate(\"test\").addMapping(\"doc\", \"double0\", \"type=double\", \"double1\", \"type=double\", \"double2\", \"type=double\"));\n ensureGreen(\"test\");\n+\n+ Map<String, Object> doc1 = new HashMap<>();\n+ doc1.put(\"double0\", new Double[]{5.0d, 1.0d, 1.5d});\n+ doc1.put(\"double1\", new Double[]{1.2d, 2.4d});\n+ doc1.put(\"double2\", 3.0d);\n+\n+ Map<String, Object> doc2 = new HashMap<>();\n+ doc2.put(\"double0\", 5.0d);\n+ doc2.put(\"double1\", 3.0d);\n+\n+ Map<String, Object> doc3 = new HashMap<>();\n+ doc3.put(\"double0\", new Double[]{5.0d, 1.0d, 1.5d, -1.5d});\n+ doc3.put(\"double1\", 4.0d);\n+\n indexRandom(true,\n- client().prepareIndex(\"test\", \"doc\", \"1\").setSource(\"double0\", \"5.0\", \"double0\", \"1.0\", \"double0\", \"1.5\", \"double1\", \"1.2\", \"double1\", \"2.4\", \"double2\", \"3.0\"),\n- client().prepareIndex(\"test\", \"doc\", \"2\").setSource(\"double0\", \"5.0\", \"double1\", \"3.0\"),\n- client().prepareIndex(\"test\", \"doc\", \"3\").setSource(\"double0\", \"5.0\", \"double0\", \"1.0\", \"double0\", \"1.5\", \"double0\", \"-1.5\", \"double1\", \"4.0\"));\n+ client().prepareIndex(\"test\", \"doc\", \"1\").setSource(doc1),\n+ client().prepareIndex(\"test\", \"doc\", \"2\").setSource(doc2),\n+ client().prepareIndex(\"test\", \"doc\", \"3\").setSource(doc3));\n \n \n SearchResponse rsp = buildRequest(\"doc['double0'].count() + doc['double1'].count()\").get();", "filename": "modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java", "status": "modified" }, { "diff": "@@ -184,6 +184,9 @@ public static SearchRequest createSearchRequest(PercolateRequest percolateReques\n }\n } else if (token.isValue()) {\n if (\"size\".equals(currentFieldName)) {\n+ if (percolateRequest.onlyCount()) {\n+ throw new IllegalArgumentException(\"Cannot set size if onlyCount == true\");\n+ }\n searchSource.field(\"size\", parser.intValue());\n } else if (\"sort\".equals(currentFieldName)) {\n searchSource.field(\"sort\", parser.text());", "filename": "modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java", "status": "modified" }, { "diff": "@@ -94,24 +94,25 @@ public void testAggregations() throws Exception {\n PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client())\n .setIndices(INDEX_NAME)\n .setDocumentType(\"type\")\n- .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field(\"field1\", value).endObject()))\n- .setSize(expectedCount[i % numUniqueQueries]);\n+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field(\"field1\", value).endObject()));\n \n SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());\n percolateRequestBuilder.addAggregation(AggregationBuilders.terms(\"a\").field(\"field2\").collectMode(aggCollectionMode));\n \n if (randomBoolean()) {\n percolateRequestBuilder.setPercolateQuery(matchAllQuery());\n }\n- if (randomBoolean()) {\n- percolateRequestBuilder.setScore(true);\n- } else {\n- percolateRequestBuilder.setSortByScore(true).setSize(numQueries);\n- }\n \n boolean countOnly = randomBoolean();\n if (countOnly) {\n percolateRequestBuilder.setOnlyCount(countOnly);\n+ } else {\n+ // can only set size if we also keep track of matches (i.e. countOnly == false)\n+ if (randomBoolean()) {\n+ percolateRequestBuilder.setScore(true).setSize(expectedCount[i % numUniqueQueries]);\n+ } else {\n+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);\n+ }\n }\n \n PercolateResponse response = percolateRequestBuilder.execute().actionGet();\n@@ -161,24 +162,25 @@ public void testAggregationsAndPipelineAggregations() throws Exception {\n PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client())\n .setIndices(INDEX_NAME)\n .setDocumentType(\"type\")\n- .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field(\"field1\", value).endObject()))\n- .setSize(expectedCount[i % numUniqueQueries]);\n+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field(\"field1\", value).endObject()));\n \n SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());\n percolateRequestBuilder.addAggregation(AggregationBuilders.terms(\"a\").field(\"field2\").collectMode(aggCollectionMode));\n \n if (randomBoolean()) {\n percolateRequestBuilder.setPercolateQuery(matchAllQuery());\n }\n- if (randomBoolean()) {\n- percolateRequestBuilder.setScore(true);\n- } else {\n- percolateRequestBuilder.setSortByScore(true).setSize(numQueries);\n- }\n \n boolean countOnly = randomBoolean();\n if (countOnly) {\n percolateRequestBuilder.setOnlyCount(countOnly);\n+ } else {\n+ // can only set size if we also keep track of matches (i.e. countOnly == false)\n+ if (randomBoolean()) {\n+ percolateRequestBuilder.setScore(true).setSize(expectedCount[i % numUniqueQueries]);\n+ } else {\n+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);\n+ }\n }\n \n percolateRequestBuilder.addAggregation(PipelineAggregatorBuilders.maxBucket(\"max_a\", \"a>_count\"));\n@@ -243,8 +245,7 @@ public void testSingleShardAggregations() throws Exception {\n PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client())\n .setIndices(INDEX_NAME)\n .setDocumentType(\"type\")\n- .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field(\"field1\", value).endObject()))\n- .setSize(numQueries);\n+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field(\"field1\", value).endObject()));\n \n SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());\n percolateRequestBuilder.addAggregation(AggregationBuilders.terms(\"terms\").field(\"field2\").collectMode(aggCollectionMode)\n@@ -253,15 +254,17 @@ public void testSingleShardAggregations() throws Exception {\n if (randomBoolean()) {\n percolateRequestBuilder.setPercolateQuery(matchAllQuery());\n }\n- if (randomBoolean()) {\n- percolateRequestBuilder.setScore(true);\n- } else {\n- percolateRequestBuilder.setSortByScore(true).setSize(numQueries);\n- }\n \n boolean countOnly = randomBoolean();\n if (countOnly) {\n percolateRequestBuilder.setOnlyCount(countOnly);\n+ } else {\n+ // can only set size if we also keep track of matches (i.e. countOnly == false)\n+ if (randomBoolean()) {\n+ percolateRequestBuilder.setScore(true).setSize(numQueries);\n+ } else {\n+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);\n+ }\n }\n \n percolateRequestBuilder.addAggregation(PipelineAggregatorBuilders.maxBucket(\"max_terms\", \"terms>_count\"));", "filename": "modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java", "status": "modified" }, { "diff": "@@ -1031,24 +1031,28 @@ public void testPercolateSizingWithQueryAndFilter() throws Exception {\n refresh();\n \n boolean onlyCount = randomBoolean();\n- PercolateResponse response = preparePercolate(client())\n- .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n- .setOnlyCount(onlyCount)\n- .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setSize((int) totalQueries)\n- .execute().actionGet();\n+ PercolateRequestBuilder builder = preparePercolate(client())\n+ .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n+ .setOnlyCount(onlyCount)\n+ .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"));\n+ if (!onlyCount) {\n+ builder.setSize((int) totalQueries);\n+ }\n+ PercolateResponse response = builder.execute().actionGet();\n assertMatchCount(response, totalQueries);\n if (!onlyCount) {\n assertThat(response.getMatches().length, equalTo((int) totalQueries));\n }\n \n int size = randomIntBetween(0, (int) totalQueries - 1);\n- response = preparePercolate(client())\n- .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n- .setOnlyCount(onlyCount)\n- .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setSize(size)\n- .execute().actionGet();\n+ builder = preparePercolate(client())\n+ .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n+ .setOnlyCount(onlyCount)\n+ .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"));\n+ if (!onlyCount) {\n+ builder.setSize(size);\n+ }\n+ response = builder.execute().actionGet();\n assertMatchCount(response, totalQueries);\n if (!onlyCount) {\n assertThat(response.getMatches().length, equalTo(size));\n@@ -1060,13 +1064,15 @@ public void testPercolateSizingWithQueryAndFilter() throws Exception {\n int runs = randomIntBetween(3, 16);\n for (int i = 0; i < runs; i++) {\n onlyCount = randomBoolean();\n- response = preparePercolate(client())\n+ builder = preparePercolate(client())\n .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n .setOnlyCount(onlyCount)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(termQuery(\"level\", 1 + randomInt(numLevels - 1)))\n- .setSize((int) numQueriesPerLevel)\n- .execute().actionGet();\n+ .setPercolateQuery(termQuery(\"level\", 1 + randomInt(numLevels - 1)));\n+ if (!onlyCount) {\n+ builder.setSize((int) numQueriesPerLevel);\n+ }\n+ response = builder.execute().actionGet();\n assertMatchCount(response, numQueriesPerLevel);\n if (!onlyCount) {\n assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));\n@@ -1075,13 +1081,15 @@ public void testPercolateSizingWithQueryAndFilter() throws Exception {\n \n for (int i = 0; i < runs; i++) {\n onlyCount = randomBoolean();\n- response = preparePercolate(client())\n+ builder = preparePercolate(client())\n .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n .setOnlyCount(onlyCount)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(termQuery(\"level\", 1 + randomInt(numLevels - 1)))\n- .setSize((int) numQueriesPerLevel)\n- .execute().actionGet();\n+ .setPercolateQuery(termQuery(\"level\", 1 + randomInt(numLevels - 1)));\n+ if (!onlyCount) {\n+ builder.setSize((int) numQueriesPerLevel);\n+ }\n+ response = builder.execute().actionGet();\n assertMatchCount(response, numQueriesPerLevel);\n if (!onlyCount) {\n assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));\n@@ -1091,13 +1099,15 @@ public void testPercolateSizingWithQueryAndFilter() throws Exception {\n for (int i = 0; i < runs; i++) {\n onlyCount = randomBoolean();\n size = randomIntBetween(0, (int) numQueriesPerLevel - 1);\n- response = preparePercolate(client())\n+ builder = preparePercolate(client())\n .setIndices(INDEX_NAME).setDocumentType(\"my-type\")\n .setOnlyCount(onlyCount)\n- .setSize(size)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(termQuery(\"level\", 1 + randomInt(numLevels - 1)))\n- .execute().actionGet();\n+ .setPercolateQuery(termQuery(\"level\", 1 + randomInt(numLevels - 1)));\n+ if (!onlyCount) {\n+ builder.setSize(size);\n+ }\n+ response = builder.execute().actionGet();\n assertMatchCount(response, numQueriesPerLevel);\n if (!onlyCount) {\n assertThat(response.getMatches().length, equalTo(size));\n@@ -1726,7 +1736,7 @@ public void testNestedDocFilter() throws IOException {\n .setPercolateDoc(docBuilder().setDoc(doc))\n .get();\n assertMatchCount(response, 3L);\n- response = preparePercolate(client()).setScore(randomBoolean()).setSortByScore(randomBoolean()).setOnlyCount(randomBoolean()).setSize(10).setPercolateQuery(QueryBuilders.termQuery(\"text\", \"foo\"))\n+ response = preparePercolate(client()).setScore(randomBoolean()).setSortByScore(randomBoolean()).setOnlyCount(randomBoolean()).setPercolateQuery(QueryBuilders.termQuery(\"text\", \"foo\"))\n .setIndices(INDEX_NAME).setDocumentType(\"doc\")\n .setPercolateDoc(docBuilder().setDoc(doc))\n .get();", "filename": "modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n package org.elasticsearch.test.rest.yaml.restspec;\n \n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n import org.elasticsearch.common.xcontent.yaml.YamlXContent;\n import org.elasticsearch.test.ESTestCase;\n \n@@ -71,6 +72,8 @@ public void testDuplicatePaths() throws Exception {\n }\n \n public void testDuplicateParts() throws Exception {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n parseAndExpectFailure(\"{\\n\" +\n \" \\\"ping\\\": {\" +\n \" \\\"documentation\\\": \\\"http://www.elasticsearch.org/guide/\\\",\" +\n@@ -103,6 +106,8 @@ public void testDuplicateParts() throws Exception {\n }\n \n public void testDuplicateParams() throws Exception {\n+ assumeFalse(\"Test only makes sense if JSON parser doesn't have strict duplicate checks enabled\",\n+ JsonXContent.isStrictDuplicateDetectionEnabled());\n parseAndExpectFailure(\"{\\n\" +\n \" \\\"ping\\\": {\" +\n \" \\\"documentation\\\": \\\"http://www.elasticsearch.org/guide/\\\",\" +", "filename": "test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java", "status": "modified" } ] }
{ "body": "While `NodeService.info` is careful about not dereferencing objects that may be null, `NodeService.stats` is not. It appears to be causing NPEs on nodes that have http disabled, since the `httpServer` variable in `null`.", "comments": [], "number": 22058, "title": "NodeService.stats may hit NPEs" }
{ "body": "This commit adds safety against an NPE if HTTP stats are requested but\r\nHTTP is disabled on a node.\r\n\r\nCloses #22058", "number": 22060, "review_comments": [], "title": "Avoid NPE in NodeService#stats if HTTP is disabled" }
{ "commits": [ { "message": "Avoid NPE in NodeService#stats if HTTP is disabled\n\nThis commit adds safety against an NPE if HTTP stats are requested but\nHTTP is disabled on a node." }, { "message": "Add license header for NodeServiceTests.java\n\nThis commit adds a license header for NodeServiceTests.java." }, { "message": "Merge branch 'master' into node-service-is-bad-and-should-feel-bad\n\n* master:\n Skip IP range query REST test prior to 5.1.2\n Bump version to 5.1.2\n Don't allow yaml tests with `warnings` that don't skip `warnings` (#21989)\n Cannot force allocate primary to a node where the shard already exists (#22031)\n Fix REST test for ip range aggregations.\n Build: NORELEASE is the same as norelease (#22006)\n S3/Azure snapshot repo documentation wrong for \"read_only\"" } ], "files": [ { "diff": "@@ -19,9 +19,6 @@\n \n package org.elasticsearch.node.service;\n \n-import java.io.Closeable;\n-import java.io.IOException;\n-\n import org.elasticsearch.Build;\n import org.elasticsearch.Version;\n import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;\n@@ -44,6 +41,9 @@\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n \n+import java.io.Closeable;\n+import java.io.IOException;\n+\n public class NodeService extends AbstractComponent implements Closeable {\n \n private final ThreadPool threadPool;\n@@ -111,7 +111,7 @@ public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, bo\n threadPool ? this.threadPool.stats() : null,\n fs ? monitorService.fsService().stats() : null,\n transport ? transportService.stats() : null,\n- http ? httpServer.stats() : null,\n+ http ? (httpServer == null ? null : httpServer.stats()) : null,\n circuitBreaker ? circuitBreakerService.stats() : null,\n script ? scriptService.stats() : null,\n discoveryStats ? discovery.stats() : null,\n@@ -127,4 +127,5 @@ public IngestService getIngestService() {\n public void close() throws IOException {\n indicesService.close();\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/node/service/NodeService.java", "status": "modified" }, { "diff": "@@ -0,0 +1,43 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.node.service;\n+\n+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;\n+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;\n+import org.elasticsearch.common.network.NetworkModule;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.test.ESSingleNodeTestCase;\n+\n+import static org.hamcrest.Matchers.hasSize;\n+\n+public class NodeServiceTests extends ESSingleNodeTestCase {\n+\n+ @Override\n+ protected Settings nodeSettings() {\n+ return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).build();\n+ }\n+\n+ public void testHttpServerDisabled() {\n+ // test for a bug where if HTTP stats were requested but HTTP was disabled, NodeService would hit a NullPointerException\n+ NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().http(true)).actionGet();\n+ assertThat(response.getNodes(), hasSize(1));\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/node/service/NodeServiceTests.java", "status": "added" } ] }
{ "body": "Obviously the following needs work (porting from 1.x)... But this error is just unreal. I'm trying to provide a default value for a terms field: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html and https://www.elastic.co/guide/en/elasticsearch/reference/current/boolean.html\r\n\r\n```\r\nPUT test \r\n{\r\n \"mappings\": {\r\n \"test\": {\r\n \"properties\": {\r\n \"is_first_occurrence\": {\r\n \"type\": \"boolean\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPOST /test/test/_search\r\n{\r\n \"size\": 0,\r\n \"aggs\": {\r\n \"terms_is_first_occurrence\": {\r\n \"terms\": {\r\n \"field\": \"is_first_occurrence\",\r\n \"missing\": \"false\"\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nReturns the following error a very large percentage of the time, otherwise it succeeds\r\n```json\r\n {\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [is_first_occurrence] of type [boolean] does not support custom time zones\"\r\n }\r\n ],\r\n \"type\": \"search_phase_execution_exception\",\r\n \"reason\": \"all shards failed\",\r\n \"phase\": \"query\",\r\n \"grouped\": true,\r\n \"failed_shards\": [\r\n {\r\n \"shard\": 0,\r\n \"index\": \"test\",\r\n \"node\": \"qCyn3KxuRCS2cUBxDMbz2g\",\r\n \"reason\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [is_first_occurrence] of type [boolean] does not support custom time zones\"\r\n }\r\n }\r\n ],\r\n \"caused_by\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [is_first_occurrence] of type [boolean] does not support custom time zones\"\r\n }\r\n },\r\n \"status\": 400\r\n}\r\n```", "comments": [ { "body": "Seems specifying any value for `missing` doesn't work. I also tried `\"include\": 1` and that blew up with \r\n\r\n```json\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_NUMBER [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n }\r\n ],\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_NUMBER [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n },\r\n \"status\": 400\r\n}`\r\n```\r\n\r\nDoing `\"include\": true` gives\r\n\r\n```json\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_BOOLEAN [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n }\r\n ],\r\n \"type\": \"parsing_exception\",\r\n \"reason\": \"Unexpected token VALUE_BOOLEAN [include] in [terms_is_first_occurrence].\",\r\n \"line\": 7,\r\n \"col\": 20\r\n },\r\n \"status\": 400\r\n}\r\n```", "created_at": "2016-12-06T21:13:12Z" }, { "body": "Thanks for the bug report. I can reproduce the issue that you describe.", "created_at": "2016-12-07T10:09:51Z" }, { "body": "Also hit this bug, we see it for text fields and boolean fields. Really confusing issue.", "created_at": "2016-12-07T20:24:05Z" }, { "body": "Hit this bug as well. We discovered it does not throw an error with `keywordField` types.\r\n\r\n \"aggregations\" : {\r\n \"statuses\" : {\r\n \"terms\" : {\r\n \"field\" : \"myIntField\",\r\n \"missing\" : \"other\"\r\n }\r\n }\r\n\r\nreturns\r\n\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [myIntField] of type [integer] does not support custom time zones\"\r\n }\r\n ],\r\n \"type\": \"search_phase_execution_exception\",\r\n \"reason\": \"all shards failed\",\r\n \"phase\": \"query\",\r\n \"grouped\": true,\r\n \"failed_shards\": [\r\n {\r\n \"shard\": 0,\r\n \"index\": \"production_index\",\r\n \"node\": \"myNode\",\r\n \"reason\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [myIntField] of type [integer] does not support custom time zones\"\r\n }\r\n }\r\n ],\r\n \"caused_by\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"Field [myIntField] of type [integer] does not support custom time zones\"\r\n }\r\n },\r\n \"status\": 400\r\n }", "created_at": "2016-12-07T20:39:58Z" } ], "number": 22009, "title": "5.0.2 - Terms Aggregate Field [is_first_occurrence] of type [boolean] does not support custom time zones" }
{ "body": "This is an attempt to start moving aggs parsing to `ObjectParser`. There is\r\nstill A LOT to do, but ObjectParser is way better than the way aggregations\r\nparsing works today. For instance in most cases, we reject numbers that are\r\nprovided as strings, which we are supposed to accept since some client languages\r\n(looking at you Perl) cannot make sure to use the appropriate types.\r\n\r\nRelates to #22009", "number": 22048, "review_comments": [ { "body": "I needed to do that to be allowed to throw an IOException in lambda expressions that do the parsing.", "created_at": "2016-12-08T15:16:58Z" }, { "body": "the parsing of arrays allowed either the value directly, or an array of values, except in the case that values were represented by an object, which I think was wrong?", "created_at": "2016-12-08T15:17:38Z" }, { "body": "Ah! That makes sense. I'm happy with it.", "created_at": "2016-12-08T15:32:32Z" }, { "body": "++", "created_at": "2016-12-08T15:32:36Z" }, { "body": "++", "created_at": "2016-12-08T15:33:11Z" }, { "body": "I wish we could enforce this with checkstyle. But it doesn't support it because it doesn't look at the bytecode. I *love* having these.", "created_at": "2016-12-08T15:34:12Z" }, { "body": "But I forget to add them sometimes, obviously.", "created_at": "2016-12-08T15:34:22Z" }, { "body": "I was imagining removing this class entirely and moving the parser to a static function/`public static final ObjectParser` variable in `GeoGridAggregationBuilder` I'd have made the `addFields` method a static method on `AbstractValuesSourceParser` and probably called it something different. Or removed `AbstractValuesSourceParser` entirely and moved the static methods to some other spot. I'm not sure about that. Basically I figured we don't even need this class and should try to get away with something static in the builder.", "created_at": "2016-12-08T15:39:31Z" }, { "body": "This looks fairly similar to the one for dates above. I wonder if they should share code?\r\n\r\nI have this problem - when it looks like code is the same at first glance I have a *ton* of trouble spotting small differences, especially in code reviews. Which is why I'm more sensitive than most folks to this topic.", "created_at": "2016-12-08T15:45:09Z" }, { "body": "I think this one is a prime argument for my \"this can be a static method/field on the builder\" argument.", "created_at": "2016-12-08T15:45:48Z" }, { "body": "Can you add the `{` here? I'm super not used to `for` without it.", "created_at": "2016-12-08T15:46:38Z" }, { "body": "Here too.\r\n\r\nIt might make sense to make a method on the builder that supports a list of ranges just to make these definitions shorter. Maybe. I'm not sure.", "created_at": "2016-12-08T15:47:43Z" }, { "body": "Man, one day we'll remove `ParseFieldMatcher` and everything will be so much shorter....", "created_at": "2016-12-08T15:48:33Z" }, { "body": "You can use `ContstructingObjectParser` so you don't need to make these funny sacrifices. It is built for exactly these sorts of situations. It isn't always the right thing, but sometimes it is. Maybe here?", "created_at": "2016-12-08T15:49:55Z" }, { "body": "Here too", "created_at": "2016-12-08T15:50:35Z" }, { "body": "Here too.", "created_at": "2016-12-08T15:51:25Z" }, { "body": "OK - I've made it this far and I'm going to stop here for this round. I think I've left enough feedback for a lively discussion!", "created_at": "2016-12-08T15:53:43Z" }, { "body": "I wanted to do it, but the issue is that the name is not found out while parsing the aggregation but propagated from the aggregation parser of the parent aggregation. We might be able to do that once all aggs use ObjectParser and named objects, but for now I don't think it is possible?", "created_at": "2016-12-08T16:24:09Z" }, { "body": "I'm fine with that explanation. `ConstructingObjectParser` is only for cases where it actually helps.", "created_at": "2016-12-08T17:29:54Z" }, { "body": "I gave it a try.", "created_at": "2016-12-08T18:50:45Z" }, { "body": "I added a comment about it to make it hopefully less troubling: it also accepts `_time` as an alias for `_key` (which I discovered while doing these changes).", "created_at": "2016-12-08T18:51:40Z" }, { "body": "done", "created_at": "2016-12-08T18:51:51Z" }, { "body": "I'll defer that since I'd like to avoid modifying the builders whenever possible.", "created_at": "2016-12-08T18:52:13Z" }, { "body": "I'd probably stick the parser helpers at the bottom of the class.", "created_at": "2016-12-08T19:02:42Z" } ], "title": "Start using `ObjectParser` for aggs." }
{ "commits": [ { "message": "Start using `ObjectParser` for aggs.\n\nThis is an attempt to start moving aggs parsing to `ObjectParser`. There is\nstill A LOT to do, but ObjectParser is way better than the way aggregations\nparsing works today. For instance in most cases, we reject numbers that are\nprovided as strings, which we are supposed to accept since some client languages\n(looking at you Perl) cannot make sure to use the appropriate types.\n\nRelates to #22009" }, { "message": "iter" }, { "message": "iter" } ], "files": [ { "diff": "@@ -29,6 +29,7 @@\n import org.elasticsearch.common.joda.Joda;\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.ContextParser;\n import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -43,7 +44,6 @@\n import java.util.List;\n import java.util.Objects;\n import java.util.concurrent.TimeUnit;\n-import java.util.function.BiFunction;\n \n /**\n * A collection of tombstones for explicitly marking indices as deleted in the cluster state.\n@@ -367,7 +367,7 @@ public static final class Tombstone implements ToXContent, Writeable {\n TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY));\n }\n \n- static BiFunction<XContentParser, ParseFieldMatcherSupplier, Tombstone> getParser() {\n+ static ContextParser<ParseFieldMatcherSupplier, Tombstone> getParser() {\n return (p, c) -> TOMBSTONE_PARSER.apply(p, c).build();\n }\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java", "status": "modified" }, { "diff": "@@ -35,14 +35,7 @@\n * Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the \"declare\" methods so they can be shared.\n */\n public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatcherSupplier>\n- implements BiFunction<XContentParser, Context, Value> {\n- /**\n- * Reads an object from a parser using some context.\n- */\n- @FunctionalInterface\n- public interface ContextParser<Context, T> {\n- T parse(XContentParser p, Context c) throws IOException;\n- }\n+ implements BiFunction<XContentParser, Context, Value>, ContextParser<Context, Value> {\n \n /**\n * Reads an object right from the parser without any context.\n@@ -54,7 +47,7 @@ public interface NoContextParser<T> {\n \n /**\n * Declare some field. Usually it is easier to use {@link #declareString(BiConsumer, ParseField)} or\n- * {@link #declareObject(BiConsumer, BiFunction, ParseField)} rather than call this directly.\n+ * {@link #declareObject(BiConsumer, ContextParser, ParseField)} rather than call this directly.\n */\n public abstract <T> void declareField(BiConsumer<Value, T> consumer, ContextParser<Context, T> parser, ParseField parseField,\n ValueType type);\n@@ -66,8 +59,8 @@ public <T> void declareField(BiConsumer<Value, T> consumer, NoContextParser<T> p\n declareField(consumer, (p, c) -> parser.parse(p), parseField, type);\n }\n \n- public <T> void declareObject(BiConsumer<Value, T> consumer, BiFunction<XContentParser, Context, T> objectParser, ParseField field) {\n- declareField(consumer, (p, c) -> objectParser.apply(p, c), field, ValueType.OBJECT);\n+ public <T> void declareObject(BiConsumer<Value, T> consumer, ContextParser<Context, T> objectParser, ParseField field) {\n+ declareField(consumer, (p, c) -> objectParser.parse(p, c), field, ValueType.OBJECT);\n }\n \n public void declareFloat(BiConsumer<Value, Float> consumer, ParseField field) {\n@@ -103,9 +96,9 @@ public void declareBoolean(BiConsumer<Value, Boolean> consumer, ParseField field\n declareField(consumer, XContentParser::booleanValue, field, ValueType.BOOLEAN);\n }\n \n- public <T> void declareObjectArray(BiConsumer<Value, List<T>> consumer, BiFunction<XContentParser, Context, T> objectParser,\n+ public <T> void declareObjectArray(BiConsumer<Value, List<T>> consumer, ContextParser<Context, T> objectParser,\n ParseField field) {\n- declareField(consumer, (p, c) -> parseArray(p, () -> objectParser.apply(p, c)), field, ValueType.OBJECT_ARRAY);\n+ declareField(consumer, (p, c) -> parseArray(p, () -> objectParser.parse(p, c)), field, ValueType.OBJECT_ARRAY);\n }\n \n public void declareStringArray(BiConsumer<Value, List<String>> consumer, ParseField field) {\n@@ -144,7 +137,7 @@ private interface IOSupplier<T> {\n }\n private static <T> List<T> parseArray(XContentParser parser, IOSupplier<T> supplier) throws IOException {\n List<T> list = new ArrayList<>();\n- if (parser.currentToken().isValue()) {\n+ if (parser.currentToken().isValue() || parser.currentToken() == XContentParser.Token.START_OBJECT) {\n list.add(supplier.get()); // single value\n } else {\n while (parser.nextToken() != XContentParser.Token.END_ARRAY) {", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java", "status": "modified" }, { "diff": "@@ -140,12 +140,17 @@ public ConstructingObjectParser(String name, boolean ignoreUnknownFields, Functi\n @Override\n public Value apply(XContentParser parser, Context context) {\n try {\n- return objectParser.parse(parser, new Target(parser), context).finish();\n+ return parse(parser, context);\n } catch (IOException e) {\n throw new ParsingException(parser.getTokenLocation(), \"[\" + objectParser.getName() + \"] failed to parse object\", e);\n }\n }\n \n+ @Override\n+ public Value parse(XContentParser parser, Context context) throws IOException {\n+ return objectParser.parse(parser, new Target(parser), context).finish();\n+ }\n+\n /**\n * Pass the {@linkplain BiConsumer} this returns the declare methods to declare a required constructor argument. See this class's\n * javadoc for an example. The order in which these are declared matters: it is the order that they come in the array passed to", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java", "status": "modified" }, { "diff": "@@ -0,0 +1,30 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.common.xcontent;\n+\n+import java.io.IOException;\n+\n+/**\n+ * Reads an object from a parser using some context.\n+ */\n+@FunctionalInterface\n+public interface ContextParser<Context, T> {\n+ T parse(XContentParser p, Context c) throws IOException;\n+}", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java", "status": "added" }, { "diff": "@@ -47,8 +47,8 @@\n \n /**\n * A declarative, stateless parser that turns XContent into setter calls. A single parser should be defined for each object being parsed,\n- * nested elements can be added via {@link #declareObject(BiConsumer, BiFunction, ParseField)} which should be satisfied where possible by\n- * passing another instance of {@link ObjectParser}, this one customized for that Object.\n+ * nested elements can be added via {@link #declareObject(BiConsumer, ContextParser, ParseField)} which should be satisfied where possible\n+ * by passing another instance of {@link ObjectParser}, this one customized for that Object.\n * <p>\n * This class works well for object that do have a constructor argument or that can be built using information available from earlier in the\n * XContent. For objects that have constructors with required arguments that are specified on the same level as other fields see\n@@ -126,6 +126,7 @@ public ObjectParser(String name, boolean ignoreUnknownFields, @Nullable Supplier\n * @return a new value instance drawn from the provided value supplier on {@link #ObjectParser(String, Supplier)}\n * @throws IOException if an IOException occurs.\n */\n+ @Override\n public Value parse(XContentParser parser, Context context) throws IOException {\n if (valueSupplier == null) {\n throw new NullPointerException(\"valueSupplier is not set\");\n@@ -463,6 +464,7 @@ public enum ValueType {\n OBJECT_ARRAY(START_OBJECT, START_ARRAY),\n OBJECT_OR_BOOLEAN(START_OBJECT, VALUE_BOOLEAN),\n OBJECT_OR_STRING(START_OBJECT, VALUE_STRING),\n+ OBJECT_ARRAY_OR_STRING(START_OBJECT, START_ARRAY, VALUE_STRING),\n VALUE(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING);\n \n private final EnumSet<XContentParser.Token> tokens;", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java", "status": "modified" }, { "diff": "@@ -25,15 +25,14 @@\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.ContextParser;\n import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentHelper;\n-import org.elasticsearch.common.xcontent.XContentParser;\n \n import java.io.IOException;\n import java.util.Map;\n-import java.util.function.BiFunction;\n \n /**\n * Encapsulates a pipeline's id and configuration as a blob\n@@ -55,7 +54,7 @@ public static PipelineConfiguration readPipelineConfiguration(StreamInput in) th\n }, new ParseField(\"config\"), ObjectParser.ValueType.OBJECT);\n }\n \n- public static BiFunction<XContentParser, ParseFieldMatcherSupplier, PipelineConfiguration> getParser() {\n+ public static ContextParser<ParseFieldMatcherSupplier, PipelineConfiguration> getParser() {\n return (p, c) -> PARSER.apply(p ,c).build();\n }\n private static class Builder {", "filename": "core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java", "status": "modified" }, { "diff": "@@ -103,44 +103,34 @@\n import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.filters.InternalFilters;\n import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridParser;\n import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid;\n import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal;\n import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser;\n import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.histogram.HistogramParser;\n import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;\n import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;\n import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing;\n import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.missing.MissingParser;\n import org.elasticsearch.search.aggregations.bucket.nested.InternalNested;\n import org.elasticsearch.search.aggregations.bucket.nested.InternalReverseNested;\n import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.range.InternalBinaryRange;\n import org.elasticsearch.search.aggregations.bucket.range.InternalRange;\n import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.range.RangeParser;\n import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeParser;\n import org.elasticsearch.search.aggregations.bucket.range.date.InternalDateRange;\n import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser;\n import org.elasticsearch.search.aggregations.bucket.range.geodistance.InternalGeoDistance;\n import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeParser;\n import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedSamplerParser;\n import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler;\n import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.sampler.UnmappedSampler;\n import org.elasticsearch.search.aggregations.bucket.significant.SignificantLongTerms;\n import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms;\n import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsParser;\n import org.elasticsearch.search.aggregations.bucket.significant.UnmappedSignificantTerms;\n import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;\n import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;\n@@ -154,30 +144,21 @@\n import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;\n import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;\n import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.terms.TermsParser;\n import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms;\n import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.avg.AvgParser;\n import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg;\n import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityParser;\n import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinality;\n import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsParser;\n import org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds;\n import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidParser;\n import org.elasticsearch.search.aggregations.metrics.geocentroid.InternalGeoCentroid;\n import org.elasticsearch.search.aggregations.metrics.max.InternalMax;\n import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.max.MaxParser;\n import org.elasticsearch.search.aggregations.metrics.min.InternalMin;\n import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.min.MinParser;\n import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksParser;\n import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesParser;\n import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentileRanks;\n import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles;\n import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentileRanks;\n@@ -186,18 +167,14 @@\n import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder;\n import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;\n import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.stats.StatsParser;\n import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsParser;\n import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats;\n import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;\n import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.sum.SumParser;\n import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits;\n import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder;\n import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount;\n import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder;\n-import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser;\n import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;\n@@ -372,34 +349,34 @@ public AggregatorParsers getAggregatorParsers() {\n }\n \n private void registerAggregations(List<SearchPlugin> plugins) {\n- registerAggregation(new AggregationSpec(AvgAggregationBuilder.NAME, AvgAggregationBuilder::new, new AvgParser())\n+ registerAggregation(new AggregationSpec(AvgAggregationBuilder.NAME, AvgAggregationBuilder::new, AvgAggregationBuilder::parse)\n .addResultReader(InternalAvg::new));\n- registerAggregation(new AggregationSpec(SumAggregationBuilder.NAME, SumAggregationBuilder::new, new SumParser())\n+ registerAggregation(new AggregationSpec(SumAggregationBuilder.NAME, SumAggregationBuilder::new, SumAggregationBuilder::parse)\n .addResultReader(InternalSum::new));\n- registerAggregation(new AggregationSpec(MinAggregationBuilder.NAME, MinAggregationBuilder::new, new MinParser())\n+ registerAggregation(new AggregationSpec(MinAggregationBuilder.NAME, MinAggregationBuilder::new, MinAggregationBuilder::parse)\n .addResultReader(InternalMin::new));\n- registerAggregation(new AggregationSpec(MaxAggregationBuilder.NAME, MaxAggregationBuilder::new, new MaxParser())\n+ registerAggregation(new AggregationSpec(MaxAggregationBuilder.NAME, MaxAggregationBuilder::new, MaxAggregationBuilder::parse)\n .addResultReader(InternalMax::new));\n- registerAggregation(new AggregationSpec(StatsAggregationBuilder.NAME, StatsAggregationBuilder::new, new StatsParser())\n+ registerAggregation(new AggregationSpec(StatsAggregationBuilder.NAME, StatsAggregationBuilder::new, StatsAggregationBuilder::parse)\n .addResultReader(InternalStats::new));\n registerAggregation(new AggregationSpec(ExtendedStatsAggregationBuilder.NAME, ExtendedStatsAggregationBuilder::new,\n- new ExtendedStatsParser()).addResultReader(InternalExtendedStats::new));\n+ ExtendedStatsAggregationBuilder::parse).addResultReader(InternalExtendedStats::new));\n registerAggregation(new AggregationSpec(ValueCountAggregationBuilder.NAME, ValueCountAggregationBuilder::new,\n- new ValueCountParser()).addResultReader(InternalValueCount::new));\n+ ValueCountAggregationBuilder::parse).addResultReader(InternalValueCount::new));\n registerAggregation(new AggregationSpec(PercentilesAggregationBuilder.NAME, PercentilesAggregationBuilder::new,\n- new PercentilesParser())\n+ PercentilesAggregationBuilder::parse)\n .addResultReader(InternalTDigestPercentiles.NAME, InternalTDigestPercentiles::new)\n .addResultReader(InternalHDRPercentiles.NAME, InternalHDRPercentiles::new));\n registerAggregation(new AggregationSpec(PercentileRanksAggregationBuilder.NAME, PercentileRanksAggregationBuilder::new,\n- new PercentileRanksParser())\n+ PercentileRanksAggregationBuilder::parse)\n .addResultReader(InternalTDigestPercentileRanks.NAME, InternalTDigestPercentileRanks::new)\n .addResultReader(InternalHDRPercentileRanks.NAME, InternalHDRPercentileRanks::new));\n registerAggregation(new AggregationSpec(CardinalityAggregationBuilder.NAME, CardinalityAggregationBuilder::new,\n- new CardinalityParser()).addResultReader(InternalCardinality::new));\n+ CardinalityAggregationBuilder::parse).addResultReader(InternalCardinality::new));\n registerAggregation(new AggregationSpec(GlobalAggregationBuilder.NAME, GlobalAggregationBuilder::new,\n GlobalAggregationBuilder::parse).addResultReader(InternalGlobal::new));\n- registerAggregation(new AggregationSpec(MissingAggregationBuilder.NAME, MissingAggregationBuilder::new, new MissingParser())\n- .addResultReader(InternalMissing::new));\n+ registerAggregation(new AggregationSpec(MissingAggregationBuilder.NAME, MissingAggregationBuilder::new,\n+ MissingAggregationBuilder::parse).addResultReader(InternalMissing::new));\n registerAggregation(new AggregationSpec(FilterAggregationBuilder.NAME, FilterAggregationBuilder::new,\n FilterAggregationBuilder::parse).addResultReader(InternalFilter::new));\n registerAggregation(new AggregationSpec(FiltersAggregationBuilder.NAME, FiltersAggregationBuilder::new,\n@@ -409,42 +386,43 @@ private void registerAggregations(List<SearchPlugin> plugins) {\n .addResultReader(InternalSampler.NAME, InternalSampler::new)\n .addResultReader(UnmappedSampler.NAME, UnmappedSampler::new));\n registerAggregation(new AggregationSpec(DiversifiedAggregationBuilder.NAME, DiversifiedAggregationBuilder::new,\n- new DiversifiedSamplerParser())\n+ DiversifiedAggregationBuilder::parse)\n /* Reuses result readers from SamplerAggregator*/);\n- registerAggregation(new AggregationSpec(TermsAggregationBuilder.NAME, TermsAggregationBuilder::new, new TermsParser())\n+ registerAggregation(new AggregationSpec(TermsAggregationBuilder.NAME, TermsAggregationBuilder::new,\n+ TermsAggregationBuilder::parse)\n .addResultReader(StringTerms.NAME, StringTerms::new)\n .addResultReader(UnmappedTerms.NAME, UnmappedTerms::new)\n .addResultReader(LongTerms.NAME, LongTerms::new)\n .addResultReader(DoubleTerms.NAME, DoubleTerms::new));\n registerAggregation(new AggregationSpec(SignificantTermsAggregationBuilder.NAME, SignificantTermsAggregationBuilder::new,\n- new SignificantTermsParser(significanceHeuristicParserRegistry, queryParserRegistry))\n+ SignificantTermsAggregationBuilder.getParser(significanceHeuristicParserRegistry))\n .addResultReader(SignificantStringTerms.NAME, SignificantStringTerms::new)\n .addResultReader(SignificantLongTerms.NAME, SignificantLongTerms::new)\n .addResultReader(UnmappedSignificantTerms.NAME, UnmappedSignificantTerms::new));\n registerAggregation(new AggregationSpec(RangeAggregationBuilder.NAME, RangeAggregationBuilder::new,\n- new RangeParser()).addResultReader(InternalRange::new));\n- registerAggregation(new AggregationSpec(DateRangeAggregationBuilder.NAME, DateRangeAggregationBuilder::new, new DateRangeParser())\n- .addResultReader(InternalDateRange::new));\n- registerAggregation(new AggregationSpec(IpRangeAggregationBuilder.NAME, IpRangeAggregationBuilder::new, new IpRangeParser())\n- .addResultReader(InternalBinaryRange::new));\n- registerAggregation(new AggregationSpec(HistogramAggregationBuilder.NAME, HistogramAggregationBuilder::new, new HistogramParser())\n- .addResultReader(InternalHistogram::new));\n+ RangeAggregationBuilder::parse).addResultReader(InternalRange::new));\n+ registerAggregation(new AggregationSpec(DateRangeAggregationBuilder.NAME, DateRangeAggregationBuilder::new,\n+ DateRangeAggregationBuilder::parse).addResultReader(InternalDateRange::new));\n+ registerAggregation(new AggregationSpec(IpRangeAggregationBuilder.NAME, IpRangeAggregationBuilder::new,\n+ IpRangeAggregationBuilder::parse).addResultReader(InternalBinaryRange::new));\n+ registerAggregation(new AggregationSpec(HistogramAggregationBuilder.NAME, HistogramAggregationBuilder::new,\n+ HistogramAggregationBuilder::parse).addResultReader(InternalHistogram::new));\n registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder.NAME, DateHistogramAggregationBuilder::new,\n- new DateHistogramParser()).addResultReader(InternalDateHistogram::new));\n+ DateHistogramAggregationBuilder::parse).addResultReader(InternalDateHistogram::new));\n registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder.NAME, GeoDistanceAggregationBuilder::new,\n- new GeoDistanceParser()).addResultReader(InternalGeoDistance::new));\n- registerAggregation(new AggregationSpec(GeoGridAggregationBuilder.NAME, GeoGridAggregationBuilder::new, new GeoHashGridParser())\n- .addResultReader(InternalGeoHashGrid::new));\n+ GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new));\n+ registerAggregation(new AggregationSpec(GeoGridAggregationBuilder.NAME, GeoGridAggregationBuilder::new,\n+ GeoGridAggregationBuilder::parse).addResultReader(InternalGeoHashGrid::new));\n registerAggregation(new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new,\n NestedAggregationBuilder::parse).addResultReader(InternalNested::new));\n registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder.NAME, ReverseNestedAggregationBuilder::new,\n ReverseNestedAggregationBuilder::parse).addResultReader(InternalReverseNested::new));\n registerAggregation(new AggregationSpec(TopHitsAggregationBuilder.NAME, TopHitsAggregationBuilder::new,\n TopHitsAggregationBuilder::parse).addResultReader(InternalTopHits::new));\n- registerAggregation(new AggregationSpec(GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new, new GeoBoundsParser())\n- .addResultReader(InternalGeoBounds::new));\n+ registerAggregation(new AggregationSpec(GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new,\n+ GeoBoundsAggregationBuilder::parse).addResultReader(InternalGeoBounds::new));\n registerAggregation(new AggregationSpec(GeoCentroidAggregationBuilder.NAME, GeoCentroidAggregationBuilder::new,\n- new GeoCentroidParser()).addResultReader(InternalGeoCentroid::new));\n+ GeoCentroidAggregationBuilder::parse).addResultReader(InternalGeoCentroid::new));\n registerAggregation(new AggregationSpec(ScriptedMetricAggregationBuilder.NAME, ScriptedMetricAggregationBuilder::new,\n ScriptedMetricAggregationBuilder::parse).addResultReader(InternalScriptedMetric::new));\n registerAggregation(new AggregationSpec(ChildrenAggregationBuilder.NAME, ChildrenAggregationBuilder::new,", "filename": "core/src/main/java/org/elasticsearch/search/SearchModule.java", "status": "modified" }, { "diff": "@@ -26,11 +26,13 @@\n import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.fielddata.MultiGeoPointValues;\n import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;\n import org.elasticsearch.index.fielddata.SortingNumericDocValues;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n@@ -41,6 +43,7 @@\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -49,9 +52,24 @@\n public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource.GeoPoint, GeoGridAggregationBuilder> {\n public static final String NAME = \"geohash_grid\";\n private static final Type TYPE = new Type(NAME);\n+ public static final int DEFAULT_PRECISION = 5;\n+ public static final int DEFAULT_MAX_NUM_CELLS = 10000;\n+\n+ private static final ObjectParser<GeoGridAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(GeoGridAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareGeoFields(PARSER, false, false);\n+ PARSER.declareInt(GeoGridAggregationBuilder::precision, GeoHashGridParams.FIELD_PRECISION);\n+ PARSER.declareInt(GeoGridAggregationBuilder::size, GeoHashGridParams.FIELD_SIZE);\n+ PARSER.declareInt(GeoGridAggregationBuilder::shardSize, GeoHashGridParams.FIELD_SHARD_SIZE);\n+ }\n+\n+ public static GeoGridAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new GeoGridAggregationBuilder(aggregationName), context);\n+ }\n \n- private int precision = GeoHashGridParser.DEFAULT_PRECISION;\n- private int requiredSize = GeoHashGridParser.DEFAULT_MAX_NUM_CELLS;\n+ private int precision = DEFAULT_PRECISION;\n+ private int requiredSize = DEFAULT_MAX_NUM_CELLS;\n private int shardSize = -1;\n \n public GeoGridAggregationBuilder(String name) {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -19,12 +19,17 @@\n \n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.rounding.DateTimeUnit;\n import org.elasticsearch.common.rounding.Rounding;\n import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.XContentParser.Token;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n@@ -34,6 +39,7 @@\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -73,6 +79,48 @@ public class DateHistogramAggregationBuilder\n DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits);\n }\n \n+ private static final ObjectParser<DateHistogramAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(DateHistogramAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true);\n+\n+ PARSER.declareField((histogram, interval) -> {\n+ if (interval instanceof Long) {\n+ histogram.interval((long) interval);\n+ } else {\n+ histogram.dateHistogramInterval((DateHistogramInterval) interval);\n+ }\n+ }, p -> {\n+ if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {\n+ return p.longValue();\n+ } else {\n+ return new DateHistogramInterval(p.text());\n+ }\n+ }, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG);\n+\n+ PARSER.declareField(DateHistogramAggregationBuilder::offset, p -> {\n+ if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {\n+ return p.longValue();\n+ } else {\n+ return DateHistogramAggregationBuilder.parseStringOffset(p.text());\n+ }\n+ }, Histogram.OFFSET_FIELD, ObjectParser.ValueType.LONG);\n+\n+ PARSER.declareBoolean(DateHistogramAggregationBuilder::keyed, Histogram.KEYED_FIELD);\n+\n+ PARSER.declareLong(DateHistogramAggregationBuilder::minDocCount, Histogram.MIN_DOC_COUNT_FIELD);\n+\n+ PARSER.declareField(DateHistogramAggregationBuilder::extendedBounds, ExtendedBounds.PARSER::apply,\n+ ExtendedBounds.EXTENDED_BOUNDS_FIELD, ObjectParser.ValueType.OBJECT);\n+\n+ PARSER.declareField(DateHistogramAggregationBuilder::order, DateHistogramAggregationBuilder::parseOrder,\n+ Histogram.ORDER_FIELD, ObjectParser.ValueType.OBJECT);\n+ }\n+\n+ public static DateHistogramAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new DateHistogramAggregationBuilder(aggregationName), context);\n+ }\n+\n private long interval;\n private DateHistogramInterval dateHistogramInterval;\n private long offset = 0;\n@@ -322,4 +370,35 @@ protected boolean innerEquals(Object obj) {\n && Objects.equals(offset, other.offset)\n && Objects.equals(extendedBounds, other.extendedBounds);\n }\n+\n+ // similar to the parsing oh histogram orders, but also accepts _time as an alias for _key\n+ private static InternalOrder parseOrder(XContentParser parser, QueryParseContext context) throws IOException {\n+ InternalOrder order = null;\n+ Token token;\n+ String currentFieldName = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token == XContentParser.Token.VALUE_STRING) {\n+ String dir = parser.text();\n+ boolean asc = \"asc\".equals(dir);\n+ if (!asc && !\"desc\".equals(dir)) {\n+ throw new ParsingException(parser.getTokenLocation(), \"Unknown order direction: [\" + dir\n+ + \"]. Should be either [asc] or [desc]\");\n+ }\n+ order = resolveOrder(currentFieldName, asc);\n+ }\n+ }\n+ return order;\n+ }\n+\n+ static InternalOrder resolveOrder(String key, boolean asc) {\n+ if (\"_key\".equals(key) || \"_time\".equals(key)) {\n+ return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);\n+ }\n+ if (\"_count\".equals(key)) {\n+ return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);\n+ }\n+ return new InternalOrder.Aggregation(key, asc);\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -19,9 +19,16 @@\n \n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n+import org.elasticsearch.common.ParseField;\n+import org.elasticsearch.common.ParseFieldMatcherSupplier;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.XContentParser.Token;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n@@ -31,6 +38,7 @@\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -43,6 +51,39 @@ public class HistogramAggregationBuilder\n extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, HistogramAggregationBuilder> {\n public static final String NAME = InternalHistogram.TYPE.name();\n \n+ private static final ObjectParser<double[], ParseFieldMatcherSupplier> EXTENDED_BOUNDS_PARSER = new ObjectParser<>(\n+ Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName(),\n+ () -> new double[]{ Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY });\n+ static {\n+ EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[0] = d, new ParseField(\"min\"));\n+ EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[1] = d, new ParseField(\"max\"));\n+ }\n+\n+ private static final ObjectParser<HistogramAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(HistogramAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, false);\n+\n+ PARSER.declareDouble(HistogramAggregationBuilder::interval, Histogram.INTERVAL_FIELD);\n+\n+ PARSER.declareDouble(HistogramAggregationBuilder::offset, Histogram.OFFSET_FIELD);\n+\n+ PARSER.declareBoolean(HistogramAggregationBuilder::keyed, Histogram.KEYED_FIELD);\n+\n+ PARSER.declareLong(HistogramAggregationBuilder::minDocCount, Histogram.MIN_DOC_COUNT_FIELD);\n+\n+ PARSER.declareField((histogram, extendedBounds) -> {\n+ histogram.extendedBounds(extendedBounds[0], extendedBounds[1]);\n+ }, EXTENDED_BOUNDS_PARSER::apply, ExtendedBounds.EXTENDED_BOUNDS_FIELD, ObjectParser.ValueType.OBJECT);\n+\n+ PARSER.declareField(HistogramAggregationBuilder::order, HistogramAggregationBuilder::parseOrder,\n+ Histogram.ORDER_FIELD, ObjectParser.ValueType.OBJECT);\n+ }\n+\n+ public static HistogramAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new HistogramAggregationBuilder(aggregationName), context);\n+ }\n+\n private double interval;\n private double offset = 0;\n private double minBound = Double.POSITIVE_INFINITY;\n@@ -246,4 +287,34 @@ protected boolean innerEquals(Object obj) {\n && Objects.equals(minBound, other.minBound)\n && Objects.equals(maxBound, other.maxBound);\n }\n+\n+ private static InternalOrder parseOrder(XContentParser parser, QueryParseContext context) throws IOException {\n+ InternalOrder order = null;\n+ Token token;\n+ String currentFieldName = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token == XContentParser.Token.VALUE_STRING) {\n+ String dir = parser.text();\n+ boolean asc = \"asc\".equals(dir);\n+ if (!asc && !\"desc\".equals(dir)) {\n+ throw new ParsingException(parser.getTokenLocation(), \"Unknown order direction: [\" + dir\n+ + \"]. Should be either [asc] or [desc]\");\n+ }\n+ order = resolveOrder(currentFieldName, asc);\n+ }\n+ }\n+ return order;\n+ }\n+\n+ static InternalOrder resolveOrder(String key, boolean asc) {\n+ if (\"_key\".equals(key)) {\n+ return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);\n+ }\n+ if (\"_count\".equals(key)) {\n+ return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);\n+ }\n+ return new InternalOrder.Aggregation(key, asc);\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -21,7 +21,9 @@\n \n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n@@ -31,6 +33,7 @@\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -39,6 +42,16 @@ public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder<Va\n public static final String NAME = \"missing\";\n public static final Type TYPE = new Type(NAME);\n \n+ private static final ObjectParser<MissingAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(MissingAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareAnyFields(PARSER, true, true);\n+ }\n+\n+ public static MissingAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new MissingAggregationBuilder(aggregationName, null), context);\n+ }\n+\n public MissingAggregationBuilder(String name, ValueType targetValueType) {\n super(name, TYPE, ValuesSourceType.ANY, targetValueType);\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -20,20 +20,46 @@\n package org.elasticsearch.search.aggregations.bucket.range;\n \n import org.elasticsearch.common.io.stream.StreamInput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n+import org.elasticsearch.search.aggregations.AggregationBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n \n import java.io.IOException;\n \n public class RangeAggregationBuilder extends AbstractRangeBuilder<RangeAggregationBuilder, Range> {\n public static final String NAME = \"range\";\n static final Type TYPE = new Type(NAME);\n \n+ private static final ObjectParser<RangeAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(RangeAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, false);\n+ PARSER.declareBoolean(RangeAggregationBuilder::keyed, RangeAggregator.KEYED_FIELD);\n+\n+ PARSER.declareObjectArray((agg, ranges) -> {\n+ for (Range range : ranges) {\n+ agg.addRange(range);\n+ }\n+ }, RangeAggregationBuilder::parseRange, RangeAggregator.RANGES_FIELD);\n+ }\n+\n+ public static AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new RangeAggregationBuilder(aggregationName), context);\n+ }\n+\n+ private static Range parseRange(XContentParser parser, QueryParseContext context) throws IOException {\n+ return Range.fromXContent(parser, context.getParseFieldMatcher());\n+ }\n+\n public RangeAggregationBuilder(String name) {\n super(name, InternalRange.FACTORY);\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -20,7 +20,11 @@\n package org.elasticsearch.search.aggregations.bucket.range.date;\n \n import org.elasticsearch.common.io.stream.StreamInput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n+import org.elasticsearch.search.aggregations.AggregationBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.bucket.range.AbstractRangeBuilder;\n@@ -29,6 +33,7 @@\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.joda.time.DateTime;\n \n import java.io.IOException;\n@@ -37,6 +42,27 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder<DateRangeA\n public static final String NAME = \"date_range\";\n static final Type TYPE = new Type(NAME);\n \n+ private static final ObjectParser<DateRangeAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(DateRangeAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true);\n+ PARSER.declareBoolean(DateRangeAggregationBuilder::keyed, RangeAggregator.KEYED_FIELD);\n+\n+ PARSER.declareObjectArray((agg, ranges) -> {\n+ for (Range range : ranges) {\n+ agg.addRange(range);\n+ }\n+ }, DateRangeAggregationBuilder::parseRange, RangeAggregator.RANGES_FIELD);\n+ }\n+\n+ public static AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new DateRangeAggregationBuilder(aggregationName), context);\n+ }\n+\n+ private static Range parseRange(XContentParser parser, QueryParseContext context) throws IOException {\n+ return Range.fromXContent(parser, context.getParseFieldMatcher());\n+ }\n+\n public DateRangeAggregationBuilder(String name) {\n super(name, InternalDateRange.FACTORY);\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -19,23 +19,31 @@\n \n package org.elasticsearch.search.aggregations.bucket.range.geodistance;\n \n+import org.elasticsearch.common.ParseField;\n+import org.elasticsearch.common.ParseFieldMatcher;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.geo.GeoDistance;\n import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.unit.DistanceUnit;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.XContentParser.Token;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n+import org.elasticsearch.search.aggregations.AggregationBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.bucket.range.InternalRange;\n import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;\n-import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser.Range;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n import org.elasticsearch.search.aggregations.support.ValuesSource;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n \n import java.io.IOException;\n import java.util.ArrayList;\n@@ -45,23 +53,171 @@\n public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource.GeoPoint, GeoDistanceAggregationBuilder> {\n public static final String NAME = \"geo_distance\";\n public static final Type TYPE = new Type(NAME);\n+ static final ParseField ORIGIN_FIELD = new ParseField(\"origin\", \"center\", \"point\", \"por\");\n+ static final ParseField UNIT_FIELD = new ParseField(\"unit\");\n+ static final ParseField DISTANCE_TYPE_FIELD = new ParseField(\"distance_type\");\n \n- private final GeoPoint origin;\n+ private static final ObjectParser<GeoDistanceAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(GeoDistanceAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareGeoFields(PARSER, true, false);\n+\n+ PARSER.declareBoolean(GeoDistanceAggregationBuilder::keyed, RangeAggregator.KEYED_FIELD);\n+\n+ PARSER.declareObjectArray((agg, ranges) -> {\n+ for (Range range : ranges) {\n+ agg.addRange(range);\n+ }\n+ }, GeoDistanceAggregationBuilder::parseRange, RangeAggregator.RANGES_FIELD);\n+\n+ PARSER.declareField(GeoDistanceAggregationBuilder::unit, p -> DistanceUnit.fromString(p.text()),\n+ UNIT_FIELD, ObjectParser.ValueType.STRING);\n+\n+ PARSER.declareField(GeoDistanceAggregationBuilder::distanceType, p -> GeoDistance.fromString(p.text()),\n+ DISTANCE_TYPE_FIELD, ObjectParser.ValueType.STRING);\n+\n+ PARSER.declareField(GeoDistanceAggregationBuilder::origin, GeoDistanceAggregationBuilder::parseGeoPoint,\n+ ORIGIN_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);\n+ }\n+\n+ public static AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ GeoDistanceAggregationBuilder builder = PARSER.parse(context.parser(), new GeoDistanceAggregationBuilder(aggregationName), context);\n+ if (builder.origin() == null) {\n+ throw new IllegalArgumentException(\"Aggregation [\" + aggregationName + \"] must define an [origin].\");\n+ }\n+ return builder;\n+ }\n+\n+ public static class Range extends RangeAggregator.Range {\n+ public Range(String key, Double from, Double to) {\n+ super(key(key, from, to), from == null ? 0 : from, to);\n+ }\n+\n+ /**\n+ * Read from a stream.\n+ */\n+ public Range(StreamInput in) throws IOException {\n+ super(in.readOptionalString(), in.readDouble(), in.readDouble());\n+ }\n+\n+ @Override\n+ public void writeTo(StreamOutput out) throws IOException {\n+ out.writeOptionalString(key);\n+ out.writeDouble(from);\n+ out.writeDouble(to);\n+ }\n+\n+ private static String key(String key, Double from, Double to) {\n+ if (key != null) {\n+ return key;\n+ }\n+ StringBuilder sb = new StringBuilder();\n+ sb.append((from == null || from == 0) ? \"*\" : from);\n+ sb.append(\"-\");\n+ sb.append((to == null || Double.isInfinite(to)) ? \"*\" : to);\n+ return sb.toString();\n+ }\n+ }\n+\n+ private static GeoPoint parseGeoPoint(XContentParser parser, QueryParseContext context) throws IOException {\n+ Token token = parser.currentToken();\n+ if (token == XContentParser.Token.VALUE_STRING) {\n+ GeoPoint point = new GeoPoint();\n+ point.resetFromString(parser.text());\n+ return point;\n+ }\n+ if (token == XContentParser.Token.START_ARRAY) {\n+ double lat = Double.NaN;\n+ double lon = Double.NaN;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n+ if (Double.isNaN(lon)) {\n+ lon = parser.doubleValue();\n+ } else if (Double.isNaN(lat)) {\n+ lat = parser.doubleValue();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(), \"malformed [\" + ORIGIN_FIELD.getPreferredName()\n+ + \"]: a geo point array must be of the form [lon, lat]\");\n+ }\n+ }\n+ return new GeoPoint(lat, lon);\n+ }\n+ if (token == XContentParser.Token.START_OBJECT) {\n+ String currentFieldName = null;\n+ double lat = Double.NaN;\n+ double lon = Double.NaN;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token == XContentParser.Token.VALUE_NUMBER) {\n+ if (\"lat\".equals(currentFieldName)) {\n+ lat = parser.doubleValue();\n+ } else if (\"lon\".equals(currentFieldName)) {\n+ lon = parser.doubleValue();\n+ }\n+ }\n+ }\n+ if (Double.isNaN(lat) || Double.isNaN(lon)) {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"malformed [\" + currentFieldName + \"] geo point object. either [lat] or [lon] (or both) are \" + \"missing\");\n+ }\n+ return new GeoPoint(lat, lon);\n+ }\n+\n+ // should not happen since we only parse geo points when we encounter a string, an object or an array\n+ throw new IllegalArgumentException(\"Unexpected token [\" + token + \"] while parsing geo point\");\n+ }\n+\n+ private static Range parseRange(XContentParser parser, QueryParseContext context) throws IOException {\n+ ParseFieldMatcher parseFieldMatcher = context.getParseFieldMatcher();\n+ String fromAsStr = null;\n+ String toAsStr = null;\n+ double from = 0.0;\n+ double to = Double.POSITIVE_INFINITY;\n+ String key = null;\n+ String toOrFromOrKey = null;\n+ Token token;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ toOrFromOrKey = parser.currentName();\n+ } else if (token == XContentParser.Token.VALUE_NUMBER) {\n+ if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) {\n+ from = parser.doubleValue();\n+ } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) {\n+ to = parser.doubleValue();\n+ }\n+ } else if (token == XContentParser.Token.VALUE_STRING) {\n+ if (parseFieldMatcher.match(toOrFromOrKey, Range.KEY_FIELD)) {\n+ key = parser.text();\n+ } else if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) {\n+ fromAsStr = parser.text();\n+ } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) {\n+ toAsStr = parser.text();\n+ }\n+ }\n+ }\n+ if (fromAsStr != null || toAsStr != null) {\n+ return new Range(key, Double.parseDouble(fromAsStr), Double.parseDouble(toAsStr));\n+ } else {\n+ return new Range(key, from, to);\n+ }\n+ }\n+\n+ private GeoPoint origin;\n private List<Range> ranges = new ArrayList<>();\n private DistanceUnit unit = DistanceUnit.DEFAULT;\n private GeoDistance distanceType = GeoDistance.DEFAULT;\n private boolean keyed = false;\n \n public GeoDistanceAggregationBuilder(String name, GeoPoint origin) {\n this(name, origin, InternalGeoDistance.FACTORY);\n+ if (origin == null) {\n+ throw new IllegalArgumentException(\"[origin] must not be null: [\" + name + \"]\");\n+ }\n }\n \n private GeoDistanceAggregationBuilder(String name, GeoPoint origin,\n InternalRange.Factory<InternalGeoDistance.Bucket, InternalGeoDistance> rangeFactory) {\n super(name, rangeFactory.type(), rangeFactory.getValueSourceType(), rangeFactory.getValueType());\n- if (origin == null) {\n- throw new IllegalArgumentException(\"[origin] must not be null: [\" + name + \"]\");\n- }\n this.origin = origin;\n }\n \n@@ -82,6 +238,23 @@ public GeoDistanceAggregationBuilder(StreamInput in) throws IOException {\n unit = DistanceUnit.readFromStream(in);\n }\n \n+ // for parsing\n+ GeoDistanceAggregationBuilder(String name) {\n+ this(name, null, InternalGeoDistance.FACTORY);\n+ }\n+\n+ GeoDistanceAggregationBuilder origin(GeoPoint origin) {\n+ this.origin = origin;\n+ return this;\n+ }\n+\n+ /**\n+ * Return the {@link GeoPoint} that is used for distance computations.\n+ */\n+ public GeoPoint origin() {\n+ return origin;\n+ }\n+\n @Override\n protected void innerWriteTo(StreamOutput out) throws IOException {\n out.writeDouble(origin.lat());\n@@ -222,11 +395,11 @@ public boolean keyed() {\n \n @Override\n protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {\n- builder.field(GeoDistanceParser.ORIGIN_FIELD.getPreferredName(), origin);\n+ builder.field(ORIGIN_FIELD.getPreferredName(), origin);\n builder.field(RangeAggregator.RANGES_FIELD.getPreferredName(), ranges);\n builder.field(RangeAggregator.KEYED_FIELD.getPreferredName(), keyed);\n- builder.field(GeoDistanceParser.UNIT_FIELD.getPreferredName(), unit);\n- builder.field(GeoDistanceParser.DISTANCE_TYPE_FIELD.getPreferredName(), distanceType);\n+ builder.field(UNIT_FIELD.getPreferredName(), unit);\n+ builder.field(DISTANCE_TYPE_FIELD.getPreferredName(), distanceType);\n return builder;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -35,7 +35,7 @@\n import org.elasticsearch.search.aggregations.bucket.range.InternalRange;\n import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;\n import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Unmapped;\n-import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser.Range;\n+import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder.Range;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n import org.elasticsearch.search.aggregations.support.ValuesSource;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -20,13 +20,21 @@\n \n import org.apache.lucene.document.InetAddressPoint;\n import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.common.ParseField;\n+import org.elasticsearch.common.ParseFieldMatcher;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.network.InetAddresses;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.XContentParser.Token;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.script.Script;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n+import org.elasticsearch.search.aggregations.AggregationBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.bucket.range.BinaryRangeAggregator;\n@@ -38,6 +46,7 @@\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -53,6 +62,59 @@ public final class IpRangeAggregationBuilder\n extends ValuesSourceAggregationBuilder<ValuesSource.Bytes, IpRangeAggregationBuilder> {\n public static final String NAME = \"ip_range\";\n private static final InternalAggregation.Type TYPE = new InternalAggregation.Type(NAME);\n+ private static final ParseField MASK_FIELD = new ParseField(\"mask\");\n+\n+ private static final ObjectParser<IpRangeAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(IpRangeAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareBytesFields(PARSER, false, false);\n+\n+ PARSER.declareBoolean(IpRangeAggregationBuilder::keyed, RangeAggregator.KEYED_FIELD);\n+\n+ PARSER.declareObjectArray((agg, ranges) -> {\n+ for (Range range : ranges) agg.addRange(range);\n+ }, IpRangeAggregationBuilder::parseRange, RangeAggregator.RANGES_FIELD);\n+ }\n+\n+ public static AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new IpRangeAggregationBuilder(aggregationName), context);\n+ }\n+\n+ private static Range parseRange(XContentParser parser, QueryParseContext context) throws IOException {\n+ final ParseFieldMatcher parseFieldMatcher = context.getParseFieldMatcher();\n+ String key = null;\n+ String from = null;\n+ String to = null;\n+ String mask = null;\n+\n+ if (parser.currentToken() != Token.START_OBJECT) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[ranges] must contain objects, but hit a \" + parser.currentToken());\n+ }\n+ while (parser.nextToken() != Token.END_OBJECT) {\n+ if (parser.currentToken() == Token.FIELD_NAME) {\n+ continue;\n+ }\n+ if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.KEY_FIELD)) {\n+ key = parser.text();\n+ } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.FROM_FIELD)) {\n+ from = parser.textOrNull();\n+ } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.TO_FIELD)) {\n+ to = parser.textOrNull();\n+ } else if (parseFieldMatcher.match(parser.currentName(), MASK_FIELD)) {\n+ mask = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(), \"Unexpected ip range parameter: [\" + parser.currentName() + \"]\");\n+ }\n+ }\n+ if (mask != null) {\n+ if (key == null) {\n+ key = mask;\n+ }\n+ return new Range(key, mask);\n+ } else {\n+ return new Range(key, from, to);\n+ }\n+ }\n \n public static class Range implements ToXContent {\n private final String key;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -21,15 +21,19 @@\n \n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n+import org.elasticsearch.search.aggregations.AggregationBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n import org.elasticsearch.search.aggregations.support.ValuesSource;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -41,6 +45,19 @@ public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilde\n \n public static final int MAX_DOCS_PER_VALUE_DEFAULT = 1;\n \n+ private static final ObjectParser<DiversifiedAggregationBuilder, QueryParseContext> PARSER;\n+ static {\n+ PARSER = new ObjectParser<>(DiversifiedAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareAnyFields(PARSER, true, false);\n+ PARSER.declareInt(DiversifiedAggregationBuilder::shardSize, SamplerAggregator.SHARD_SIZE_FIELD);\n+ PARSER.declareInt(DiversifiedAggregationBuilder::maxDocsPerValue, SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD);\n+ PARSER.declareString(DiversifiedAggregationBuilder::executionHint, SamplerAggregator.EXECUTION_HINT_FIELD);\n+ }\n+\n+ public static AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return PARSER.parse(context.parser(), new DiversifiedAggregationBuilder(aggregationName), context);\n+ }\n+\n private int shardSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE;\n private int maxDocsPerValue = MAX_DOCS_PER_VALUE_DEFAULT;\n private String executionHint = null;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -21,14 +21,20 @@\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n+import org.elasticsearch.common.xcontent.ParseFieldRegistry;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.query.QueryBuilder;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n+import org.elasticsearch.search.aggregations.AggregationBuilder;\n+import org.elasticsearch.search.aggregations.Aggregator;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore;\n import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic;\n+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser;\n import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;\n import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;\n@@ -39,6 +45,7 @@\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n@@ -55,6 +62,48 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB\n 3, 0, 10, -1);\n static final SignificanceHeuristic DEFAULT_SIGNIFICANCE_HEURISTIC = new JLHScore();\n \n+ public static Aggregator.Parser getParser(ParseFieldRegistry<SignificanceHeuristicParser> significanceHeuristicParserRegistry) {\n+ ObjectParser<SignificantTermsAggregationBuilder, QueryParseContext> parser =\n+ new ObjectParser<>(SignificantTermsAggregationBuilder.NAME);\n+ ValuesSourceParserHelper.declareAnyFields(parser, true, true);\n+\n+ parser.declareInt(SignificantTermsAggregationBuilder::shardSize, TermsAggregationBuilder.SHARD_SIZE_FIELD_NAME);\n+\n+ parser.declareLong(SignificantTermsAggregationBuilder::minDocCount, TermsAggregationBuilder.MIN_DOC_COUNT_FIELD_NAME);\n+\n+ parser.declareLong(SignificantTermsAggregationBuilder::shardMinDocCount, TermsAggregationBuilder.SHARD_MIN_DOC_COUNT_FIELD_NAME);\n+\n+ parser.declareInt(SignificantTermsAggregationBuilder::size, TermsAggregationBuilder.REQUIRED_SIZE_FIELD_NAME);\n+\n+ parser.declareString(SignificantTermsAggregationBuilder::executionHint, TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME);\n+\n+ parser.declareObject((b, v) -> { if (v.isPresent()) b.backgroundFilter(v.get()); },\n+ (p, context) -> context.parseInnerQueryBuilder(),\n+ SignificantTermsAggregationBuilder.BACKGROUND_FILTER);\n+\n+ parser.declareField((b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())),\n+ IncludeExclude::parseInclude, IncludeExclude.INCLUDE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);\n+\n+ parser.declareField((b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),\n+ IncludeExclude::parseExclude, IncludeExclude.EXCLUDE_FIELD, ObjectParser.ValueType.STRING_ARRAY);\n+\n+ for (String name : significanceHeuristicParserRegistry.getNames()) {\n+ parser.declareObject(SignificantTermsAggregationBuilder::significanceHeuristic,\n+ (p, context) -> {\n+ SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserRegistry\n+ .lookupReturningNullIfNotFound(name, context.getParseFieldMatcher());\n+ return significanceHeuristicParser.parse(context);\n+ },\n+ new ParseField(name));\n+ }\n+ return new Aggregator.Parser() {\n+ @Override\n+ public AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException {\n+ return parser.parse(context.parser(), new SignificantTermsAggregationBuilder(aggregationName, null), context);\n+ }\n+ };\n+ }\n+\n private IncludeExclude includeExclude = null;\n private String executionHint = null;\n private QueryBuilder filterBuilder = null;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -26,8 +26,8 @@\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.index.query.QueryShardException;\n-import org.elasticsearch.search.aggregations.support.XContentParseContext;\n \n import java.io.IOException;\n \n@@ -113,13 +113,13 @@ protected SignificanceHeuristic newHeuristic(boolean includeNegatives, boolean b\n }\n \n @Override\n- public SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException {\n- XContentParser parser = context.getParser();\n+ public SignificanceHeuristic parse(QueryParseContext context) throws IOException, QueryShardException {\n+ XContentParser parser = context.parser();\n String givenName = parser.currentName();\n boolean backgroundIsSuperset = true;\n XContentParser.Token token = parser.nextToken();\n while (!token.equals(XContentParser.Token.END_OBJECT)) {\n- if (context.matchField(parser.currentName(), BACKGROUND_IS_SUPERSET)) {\n+ if (context.getParseFieldMatcher().match(parser.currentName(), BACKGROUND_IS_SUPERSET)) {\n parser.nextToken();\n backgroundIsSuperset = parser.booleanValue();\n } else {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java", "status": "modified" }, { "diff": "@@ -26,8 +26,8 @@\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.index.query.QueryShardException;\n-import org.elasticsearch.search.aggregations.support.XContentParseContext;\n \n import java.io.IOException;\n \n@@ -104,9 +104,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n return builder;\n }\n \n- public static SignificanceHeuristic parse(XContentParseContext context)\n+ public static SignificanceHeuristic parse(QueryParseContext context)\n throws IOException, QueryShardException {\n- XContentParser parser = context.getParser();\n+ XContentParser parser = context.parser();\n // move to the closing bracket\n if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) {\n throw new ElasticsearchParseException(", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java", "status": "modified" } ] }
{ "body": "Failing an initializing primary when shadow replicas are enabled for the index can leave the primary unassigned with replicas being active. Instead, a replica should be promoted to primary, which is fixed by this commit.\r\n\r\nTest failure:\r\n\r\nhttps://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+5.x+multijob-unix-compatibility/os=ubuntu/324/console", "comments": [ { "body": "Thanks @bleskes ", "created_at": "2016-12-07T12:59:51Z" } ], "number": 22021, "title": "Promote shadow replica to primary when initializing primary fails" }
{ "body": "Before, it was possible that the SameShardAllocationDecider would allow\r\nforce allocation of an unassigned primary to the same node on which an\r\nactive replica is assigned. This could only happen with shadow replica\r\nindices, because when a shadow replica primary fails, the replica gets\r\npromoted to primary but in the INITIALIZED state, not in the STARTED\r\nstate (because the engine has specific reinitialization that must take\r\nplace in the case of shadow replicas). Therefore, if the now promoted\r\nprimary that is initializing fails also, the primary will be in the\r\nunassigned state, because replica to primary promotion only happens when\r\nthe failed shard was in the started state. The now unassigned primary\r\nshard will go through the allocation deciders, where the\r\nSameShardsAllocationDecider would return a NO decision, but would still\r\npermit force allocation on the primary if all deciders returned NO.\r\n\r\nThis commit implements canForceAllocatePrimary on the\r\nSameShardAllocationDecider, which ensures that a primary cannot be\r\nforce allocated to the same node on which an active replica already\r\nexists.\r\n\r\nRelates #22021 ", "number": 22031, "review_comments": [ { "body": "TestShardRouting automatically provides some of the randomness that you explicitly do here.", "created_at": "2016-12-08T15:10:18Z" } ], "title": "Cannot force allocate primary to a node where the shard already exists" }
{ "commits": [ { "message": "Cannot force allocate primary to a node where the shard already exists\n\nBefore, it was possible that the SameShardAllocationDecider would allow\nforce allocation of an unassigned primary to the same node on which an\nactive replica is assigned. This could only happen with shadow replica\nindices, because when a shadow replica primary fails, the replica gets\npromoted to primary but in the INITIALIZED state, not in the STARTED\nstate (because the engine has specific reinitialization that must take\nplace in the case of shadow replicas). Therefore, if the now promoted\nprimary that is initializing fails also, the primary will be in the\nunassigned state, because replica to primary promotion only happens when\nthe failed shard was in the started state. The now unassigned primary\nshard will go through the allocation deciders, where the\nSameShardsAllocationDecider would return a NO decision, but would still\npermit force allocation on the primary if all deciders returned NO.\n\nThis commit implements canForceAllocatePrimary on the\nSameShardAllocationDecider, which ensures that a primary cannot be\nforce allocated to the same node on which an active replica already\nexists." }, { "message": "adds a test" }, { "message": "Use TestShardRouting" } ], "files": [ { "diff": "@@ -59,6 +59,54 @@ public SameShardAllocationDecider(Settings settings) {\n @Override\n public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {\n Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId());\n+ Decision decision = decideSameNode(shardRouting, node, allocation, assignedShards);\n+ if (decision.type() == Decision.Type.NO || sameHost == false) {\n+ // if its already a NO decision looking at the node, or we aren't configured to look at the host, return the decision\n+ return decision;\n+ }\n+ if (node.node() != null) {\n+ for (RoutingNode checkNode : allocation.routingNodes()) {\n+ if (checkNode.node() == null) {\n+ continue;\n+ }\n+ // check if its on the same host as the one we want to allocate to\n+ boolean checkNodeOnSameHostName = false;\n+ boolean checkNodeOnSameHostAddress = false;\n+ if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {\n+ if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {\n+ checkNodeOnSameHostAddress = true;\n+ }\n+ } else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {\n+ if (checkNode.node().getHostName().equals(node.node().getHostName())) {\n+ checkNodeOnSameHostName = true;\n+ }\n+ }\n+ if (checkNodeOnSameHostAddress || checkNodeOnSameHostName) {\n+ for (ShardRouting assignedShard : assignedShards) {\n+ if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {\n+ String hostType = checkNodeOnSameHostAddress ? \"address\" : \"name\";\n+ String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName();\n+ return allocation.decision(Decision.NO, NAME,\n+ \"the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; \" +\n+ \"set [%s] to false to allow multiple nodes on the same host to hold the same shard copies\",\n+ hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey());\n+ }\n+ }\n+ }\n+ }\n+ }\n+ return allocation.decision(Decision.YES, NAME, \"the shard does not exist on the same host\");\n+ }\n+\n+ @Override\n+ public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {\n+ assert shardRouting.primary() : \"must not call force allocate on a non-primary shard\";\n+ Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId());\n+ return decideSameNode(shardRouting, node, allocation, assignedShards);\n+ }\n+\n+ private Decision decideSameNode(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation,\n+ Iterable<ShardRouting> assignedShards) {\n for (ShardRouting assignedShard : assignedShards) {\n if (node.nodeId().equals(assignedShard.currentNodeId())) {\n if (assignedShard.isSameAllocation(shardRouting)) {\n@@ -72,39 +120,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing\n }\n }\n }\n- if (sameHost) {\n- if (node.node() != null) {\n- for (RoutingNode checkNode : allocation.routingNodes()) {\n- if (checkNode.node() == null) {\n- continue;\n- }\n- // check if its on the same host as the one we want to allocate to\n- boolean checkNodeOnSameHostName = false;\n- boolean checkNodeOnSameHostAddress = false;\n- if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {\n- if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {\n- checkNodeOnSameHostAddress = true;\n- }\n- } else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {\n- if (checkNode.node().getHostName().equals(node.node().getHostName())) {\n- checkNodeOnSameHostName = true;\n- }\n- }\n- if (checkNodeOnSameHostAddress || checkNodeOnSameHostName) {\n- for (ShardRouting assignedShard : assignedShards) {\n- if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {\n- String hostType = checkNodeOnSameHostAddress ? \"address\" : \"name\";\n- String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName();\n- return allocation.decision(Decision.NO, NAME,\n- \"the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; \" +\n- \"set [%s] to false to allow multiple nodes on the same host to hold the same shard copies\",\n- hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey());\n- }\n- }\n- }\n- }\n- }\n- }\n- return allocation.decision(Decision.YES, NAME, \"the shard does not exist on the same \" + (sameHost ? \"host\" : \"node\"));\n+ return allocation.decision(Decision.YES, NAME, \"the shard does not exist on the same node\");\n }\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java", "status": "modified" }, { "diff": "@@ -21,19 +21,29 @@\n \n import org.apache.logging.log4j.Logger;\n import org.elasticsearch.Version;\n+import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;\n+import org.elasticsearch.cluster.ClusterInfo;\n import org.elasticsearch.cluster.ClusterName;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.ESAllocationTestCase;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n+import org.elasticsearch.cluster.routing.RoutingNode;\n+import org.elasticsearch.cluster.routing.RoutingNodes;\n import org.elasticsearch.cluster.routing.RoutingTable;\n import org.elasticsearch.cluster.routing.ShardRouting;\n import org.elasticsearch.cluster.routing.ShardRoutingState;\n+import org.elasticsearch.cluster.routing.TestShardRouting;\n+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;\n+import org.elasticsearch.cluster.routing.allocation.decider.Decision;\n import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.Index;\n+\n+import java.util.Collections;\n \n import static java.util.Collections.emptyMap;\n import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;\n@@ -86,4 +96,31 @@ public void testSameHost() {\n assertThat(shardRouting.currentNodeId(), equalTo(\"node3\"));\n }\n }\n+\n+ public void testForceAllocatePrimaryOnSameNodeNotAllowed() {\n+ SameShardAllocationDecider decider = new SameShardAllocationDecider(Settings.EMPTY);\n+ ClusterState clusterState = ClusterStateCreationUtils.state(\"idx\", randomIntBetween(2, 4), 1);\n+ Index index = clusterState.getMetaData().index(\"idx\").getIndex();\n+ ShardRouting primaryShard = clusterState.routingTable().index(index).shard(0).primaryShard();\n+ RoutingNode routingNode = clusterState.getRoutingNodes().node(primaryShard.currentNodeId());\n+ RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()),\n+ new RoutingNodes(clusterState, false), clusterState, ClusterInfo.EMPTY, System.nanoTime(), false\n+ );\n+\n+ // can't force allocate same shard copy to the same node\n+ ShardRouting newPrimary = TestShardRouting.newShardRouting(primaryShard.shardId(), null, true, ShardRoutingState.UNASSIGNED);\n+ Decision decision = decider.canForceAllocatePrimary(newPrimary, routingNode, routingAllocation);\n+ assertEquals(Decision.Type.NO, decision.type());\n+\n+ // can force allocate to a different node\n+ RoutingNode unassignedNode = null;\n+ for (RoutingNode node : clusterState.getRoutingNodes()) {\n+ if (node.isEmpty()) {\n+ unassignedNode = node;\n+ break;\n+ }\n+ }\n+ decision = decider.canForceAllocatePrimary(newPrimary, unassignedNode, routingAllocation);\n+ assertEquals(Decision.Type.YES, decision.type());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nIf I have a document with an ip address field with value of 192.168.0.255, I can query for it using CIDR 192.168.0.0/24, and get a hit on the document:\r\n```\r\n{\r\n \"query\": {\r\n \"term\": { \"address\": \"192.168.0.0/24\" }\r\n }\r\n}\r\n\r\n```\r\nHowever, if I do an IP range aggregation on that subnet, then this document is not included, i.e.:\r\n```\r\n{\r\n \"size\": 0,\r\n \"aggregations\" : {\r\n \"networks\": {\r\n \"ip_range\": {\r\n \"field\": \"addresses\",\r\n \"ranges\": [\r\n { \"mask\" : \"192.168.0.0/24\" } \r\n ]\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nThat is because on generic range aggregations the `to` value is exclusive, as documented here:\r\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html\r\n\r\nIn fact, if you aggregate using a /32 CIDR, you'll never get a hit on the IP address itself, which is even more poignant, i.e., 192.168.0.1 address value will not be aggregated inside 192.168.0.1/32. The same holds if you aggregate for range from 192.168.0.1 to 192.168.0.1; such an aggregation will always have a count of 0.\r\n\r\nI suppose I can understand that `to` is exclusive for other types of ranges, but for a CIDR range I would not expect that behavior. 192.168.0.255 is definitely in the 192.168.0.0/24 subnet. I would also expect consistent behavior for querying and aggregating a subnet.\r\n\r\nThanks.", "comments": [], "number": 22005, "title": "IP Range Aggregation CIDR masks are exclusive of end of range" }
{ "body": "Closes #22005", "number": 22018, "review_comments": [], "title": "IP range masks exclude the maximum address of the range." }
{ "commits": [ { "message": "IP range masks exclude the maximum address of the range.\n\nCloses #22005" } ], "files": [ { "diff": "@@ -55,6 +55,7 @@ public final class IpRangeAggregationBuilder\n private static final InternalAggregation.Type TYPE = new InternalAggregation.Type(NAME);\n \n public static class Range implements ToXContent {\n+\n private final String key;\n private final String from;\n private final String to;\n@@ -94,8 +95,18 @@ public static class Range implements ToXContent {\n }\n this.key = key;\n try {\n- this.from = InetAddresses.toAddrString(InetAddress.getByAddress(lower));\n- this.to = InetAddresses.toAddrString(InetAddress.getByAddress(upper));\n+ InetAddress fromAddress = InetAddress.getByAddress(lower);\n+ if (fromAddress.equals(InetAddressPoint.MIN_VALUE)) {\n+ this.from = null;\n+ } else {\n+ this.from = InetAddresses.toAddrString(fromAddress);\n+ }\n+ InetAddress inclusiveToAddress = InetAddress.getByAddress(upper);\n+ if (inclusiveToAddress.equals(InetAddressPoint.MAX_VALUE)) {\n+ this.to = null;\n+ } else {\n+ this.to = InetAddresses.toAddrString(InetAddressPoint.nextUp(inclusiveToAddress));\n+ }\n } catch (UnknownHostException bogus) {\n throw new AssertionError(bogus);\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -87,4 +87,41 @@ protected IpRangeAggregationBuilder createTestAggregatorBuilder() {\n return factory;\n }\n \n+ public void testMask() {\n+ IpRangeAggregationBuilder b1 = new IpRangeAggregationBuilder(\"foo\");\n+ IpRangeAggregationBuilder b2 = new IpRangeAggregationBuilder(\"foo\");\n+ b1.addMaskRange(\"bar\", \"192.168.10.12/16\");\n+ b2.addRange(\"bar\", \"192.168.0.0\", \"192.169.0.0\");\n+ assertEquals(b1, b2);\n+\n+ b1 = new IpRangeAggregationBuilder(\"foo\");\n+ b2 = new IpRangeAggregationBuilder(\"foo\");\n+ b1.addMaskRange(\"bar\", \"192.168.0.0/31\");\n+ b2.addRange(\"bar\", \"192.168.0.0\", \"192.168.0.2\");\n+ assertEquals(b1, b2);\n+\n+ b1 = new IpRangeAggregationBuilder(\"foo\");\n+ b2 = new IpRangeAggregationBuilder(\"foo\");\n+ b1.addMaskRange(\"bar\", \"0.0.0.0/0\");\n+ b2.addRange(\"bar\", \"0.0.0.0\", \"::1:0:0:0\");\n+ assertEquals(b1, b2);\n+\n+ b1 = new IpRangeAggregationBuilder(\"foo\");\n+ b2 = new IpRangeAggregationBuilder(\"foo\");\n+ b1.addMaskRange(\"bar\", \"fe80::821f:2ff:fe4a:c5bd/64\");\n+ b2.addRange(\"bar\", \"fe80::\", \"fe80:0:0:1::\");\n+ assertEquals(b1, b2);\n+\n+ b1 = new IpRangeAggregationBuilder(\"foo\");\n+ b2 = new IpRangeAggregationBuilder(\"foo\");\n+ b1.addMaskRange(\"bar\", \"::/16\");\n+ b2.addRange(\"bar\", null, \"1::\");\n+ assertEquals(b1, b2);\n+\n+ b1 = new IpRangeAggregationBuilder(\"foo\");\n+ b2 = new IpRangeAggregationBuilder(\"foo\");\n+ b1.addMaskRange(\"bar\", \"::/0\");\n+ b2.addRange(\"bar\", null, null);\n+ assertEquals(b1, b2);\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeTests.java", "status": "modified" } ] }
{ "body": "When a primary fails, a replica shard is promoted to primary to take it's place. If the replica that is promoted resides on a node with a higher ES version than what's on nodes with other replicas, the newly promoted primary might use a new postings format or codec that is not available on the replication target nodes.\r\nPreventing a primary to be on a node with a higher version than nodes with replicas is currently done using an allocation decider (`NodeVersionAllocationDecider`). Allocation deciders are only checked though when a primary or a replica is allocated / moved, not when a replica is promoted to primary.", "comments": [ { "body": "There is another offender I think: Upon primary relocation (which can be done from an old to a new node), there is a short period in time during relocation handoff where the relocation target (on a newer node) replicates back to the relocation source (on an older node). /cc: @bleskes ", "created_at": "2016-12-06T16:22:03Z" }, { "body": "> the newly promoted primary might use a new postings format or codec that is not available on the replication target nodes.\r\n\r\nNote that is currently OK as it is only relevant when doing a recovery. However we want to make this rule stricter so it will apply at all times (and use it for other stuff).\r\n\r\n> Upon primary relocation (which can be done from an old to a new node), there is a short period in time during relocation handoff where the relocation target (on a newer node) replicates back to the relocation source (on an older node).\r\n\r\nThat's a good one. I think the period is longer though - one the primary has relocated from old to new we can end up in a situation that other replicas are on old for a long time. That part is solvable by preventing relocation of primaries from old to new if there is another replica on an old node (which will mean move the replicas first). However the scenario you describe still holds - and it gets messy. Something to think about - I don't see an immediate solution .\r\n\r\n", "created_at": "2016-12-06T19:11:32Z" }, { "body": "Replication from a newer node to an older node is ok, and difficult to avoid (e.g. primary relocation during rolling restart). The recovery is avoided as when then old primary fails, all initializing shards are failed as well. ", "created_at": "2017-06-30T10:16:15Z" } ], "number": 22002, "title": "Primary promotion violates NodeVersionAllocationDecider" }
{ "body": "When a primary shard fails, ES promotes a replica to be the new primary,\r\nhowever, if there are two replicas to choose from, one on a higher node version\r\nand one on a lower node version, promoting the higher node version could be\r\nproblematic because replicating requests could then be in a format the older\r\nversion replica cannot read.\r\n\r\nThis changes the `activeReplica` method in `RoutingNodes` to return the replica\r\non the node with the lowest ES version. This method is used to select a replica\r\nfor promotion.\r\n\r\nResolves #22002", "number": 22012, "review_comments": [ { "body": "Can you add a @link to the NodeVersionAllocationDecider?", "created_at": "2016-12-07T09:12:25Z" }, { "body": "Is it possible for `replicaNodeVersion` to be null (I don't think so)? Can you add an assertion stating != null instead of the if ( == null) check?\r\n", "created_at": "2016-12-07T09:16:47Z" }, { "body": "The 2 `if-else` conditions can then also be simplified to just:\r\n```\r\nif (lowestVersionSeen == null || replicaNodeVersion.before(lowestVersionSeen)) {\r\n lowestVersionSeen = replicaNodeVersion;\r\n candidate = shardRouting;\r\n}\r\n```", "created_at": "2016-12-07T09:20:08Z" }, { "body": "I think that the activeReplica method could also be expressed very concisely using Java 8 streams :-)", "created_at": "2016-12-07T09:22:36Z" }, { "body": "now that this method does not just select any active replica, maybe we should rename it to something that better reflects its purpose", "created_at": "2016-12-07T09:24:32Z" }, { "body": "ver -> version", "created_at": "2016-12-07T09:26:17Z" }, { "body": "> Is it possible for replicaNodeVersion to be null (I don't think so)? Can you add an assertion stating != null instead of the if ( == null) check?\r\n\r\nYes, it's possible for it to be null, I actually had an assertion in the first iteration of this but it tripped all over the place during rolling restart tests. I can investigate why though.", "created_at": "2016-12-07T16:47:11Z" } ], "title": "Promote replica on lowest version of Elasticsearch when primary fails" }
{ "commits": [ { "message": "Promote replica on lowest version of Elasticsearch when primary fails\n\nWhen a primary shard fails, ES promotes a replica to be the new primary,\nhowever, if there are two replicas to choose from, one on a higher node version\nand one on a lower node version, promoting the higher node version could be\nproblematic because replicating requests could then be in a format the older\nversion replica cannot read.\n\nThis changes the `activeReplica` method in `RoutingNodes` to return the replica\non the node with the lowest ES version. This method is used to select a replica\nfor promotion.\n\nResolves #22002" } ], "files": [ { "diff": "@@ -23,6 +23,7 @@\n import com.carrotsearch.hppc.cursors.ObjectCursor;\n import org.apache.logging.log4j.Logger;\n import org.apache.lucene.util.CollectionUtil;\n+import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n@@ -66,6 +67,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {\n \n private final Map<String, RoutingNode> nodesToShards = new HashMap<>();\n \n+ private final Map<String, Version> nodesToVersions = new HashMap<>();\n+\n private final UnassignedShards unassignedShards = new UnassignedShards(this);\n \n private final Map<ShardId, List<ShardRouting>> assignedShards = new HashMap<>();\n@@ -93,6 +96,7 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) {\n // fill in the nodeToShards with the \"live\" nodes\n for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().getDataNodes().values()) {\n nodesToShards.put(cursor.value.getId(), new LinkedHashMap<>()); // LinkedHashMap to preserve order\n+ nodesToVersions.put(cursor.value.getId(), cursor.value.getVersion());\n }\n \n // fill in the inverse of node -> shards allocated\n@@ -319,14 +323,26 @@ public ShardRouting activePrimary(ShardId shardId) {\n /**\n * Returns one active replica shard for the given shard id or <code>null</code> if\n * no active replica is found.\n+ *\n+ * Since replicas could possibly be on nodes with a newer version of ES than\n+ * the primary is, this will return replicas on the lowest version of ES.\n */\n public ShardRouting activeReplica(ShardId shardId) {\n+ Version lowestVersionSeen = null;\n+ ShardRouting candidate = null;\n for (ShardRouting shardRouting : assignedShards(shardId)) {\n if (!shardRouting.primary() && shardRouting.active()) {\n- return shardRouting;\n+ Version replicaNodeVersion = nodesToVersions.get(shardRouting.currentNodeId());\n+ if (replicaNodeVersion == null && candidate == null) {\n+ // Only use this replica if there are no other candidates\n+ candidate = shardRouting;\n+ } else if (lowestVersionSeen == null || replicaNodeVersion.before(lowestVersionSeen)) {\n+ lowestVersionSeen = replicaNodeVersion;\n+ candidate = shardRouting;\n+ }\n }\n }\n- return null;\n+ return candidate;\n }\n \n /**", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java", "status": "modified" }, { "diff": "@@ -19,12 +19,14 @@\n \n package org.elasticsearch.cluster.routing.allocation;\n \n+import com.carrotsearch.hppc.cursors.ObjectCursor;\n import org.apache.logging.log4j.Logger;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.ESAllocationTestCase;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.cluster.routing.RoutingNodes;\n import org.elasticsearch.cluster.routing.RoutingTable;\n@@ -516,6 +518,84 @@ public void testFailAllReplicasInitializingOnPrimaryFail() {\n assertThat(newPrimaryShard.allocationId(), equalTo(startedReplica.allocationId()));\n }\n \n+ public void testActiveReplicasReturnsLowestVersion() {\n+ AllocationService allocation = createAllocationService(Settings.builder().build());\n+\n+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder(\"test\")\n+ .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) .build();\n+\n+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index(\"test\")).build();\n+\n+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))\n+ .metaData(metaData).routingTable(initialRoutingTable).build();\n+\n+ ShardId shardId = new ShardId(metaData.index(\"test\").getIndex(), 0);\n+\n+ // add a single nodes\n+ clusterState = ClusterState.builder(clusterState).nodes(\n+ DiscoveryNodes.builder()\n+ .add(newNode(\"node1\", Version.V_5_0_0)))\n+ .build();\n+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, \"reroute\").routingTable()).build();\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));\n+\n+ // start primary shard\n+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));\n+\n+ // add 2 more nodes\n+ if (randomBoolean()) {\n+ clusterState = ClusterState.builder(clusterState).nodes(\n+ DiscoveryNodes.builder(clusterState.nodes())\n+ .add(newNode(\"node2\", Version.V_5_0_1))\n+ .add(newNode(\"node3\", Version.V_5_0_2)))\n+ .build();\n+ } else {\n+ clusterState = ClusterState.builder(clusterState).nodes(\n+ DiscoveryNodes.builder(clusterState.nodes())\n+ .add(newNode(\"node2\", Version.V_5_0_2))\n+ .add(newNode(\"node3\", Version.V_5_0_1)))\n+ .build();\n+ }\n+\n+ // start all the replicas\n+ clusterState = allocation.reroute(clusterState, \"reroute\");\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));\n+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(3));\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0));\n+ ShardRouting startedReplica = clusterState.getRoutingNodes().activeReplica(shardId);\n+\n+ // fail the primary shard, check replicas get removed as well...\n+ ShardRouting primaryShardToFail = clusterState.routingTable().index(\"test\").shard(0).primaryShard();\n+ ClusterState newState = allocation.applyFailedShard(clusterState, primaryShardToFail);\n+ assertThat(newState, not(equalTo(clusterState)));\n+ clusterState = newState;\n+ // the primary gets allocated on another node\n+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));\n+\n+ ShardRouting newPrimaryShard = clusterState.routingTable().index(\"test\").shard(0).primaryShard();\n+ assertThat(newPrimaryShard, not(equalTo(primaryShardToFail)));\n+ assertThat(newPrimaryShard.allocationId(), equalTo(startedReplica.allocationId()));\n+\n+ Version replicaNodeVersion = clusterState.nodes().getDataNodes().get(startedReplica.currentNodeId()).getVersion();\n+ assertNotNull(replicaNodeVersion);\n+ logger.info(\"--> shard {} got assigned to node with version {}\", startedReplica, replicaNodeVersion);\n+\n+ for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().getDataNodes().values()) {\n+ if (\"node1\".equals(cursor.value.getId())) {\n+ // Skip the node that the primary was on, it doesn't have a replica so doesn't need a version check\n+ continue;\n+ }\n+ Version nodeVer = cursor.value.getVersion();\n+ assertTrue(\"expected node [\" + cursor.value.getId() + \"] with ver \" + nodeVer + \" to be after \" + replicaNodeVersion,\n+ replicaNodeVersion.onOrBefore(nodeVer));\n+ }\n+ }\n+\n public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToElect() {\n AllocationService allocation = createAllocationService(Settings.builder()\n .build());", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0\r\n\r\n**Plugins installed**: []\r\n\r\n**JVM version**: openjdk version \"1.8.0_111\"\r\nOpenJDK Runtime Environment (build 1.8.0_111-8u111-b14-2ubuntu0.16.04.2-b14)\r\nOpenJDK 64-Bit Server VM (build 25.111-b14, mixed mode)\r\n\r\n**OS version**: Ubuntu 16.04 4.4.0-31-generic #50-Ubuntu SMP Wed Jul 13 00:07:12 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nwhen querying for a field that is mapped as IP type, the result value is unicode garbage. \r\n\r\n**Steps to reproduce**:\r\n 1. upload mapping \r\nPOST /_template/test \r\n{\r\n \"mappings\": {\r\n \"logevent\": {\r\n \"properties\": {\r\n \"@fields\": {\r\n \"properties\": {\r\n \"IP\": {\r\n \"type\": \"ip\"\r\n }\r\n }\r\n },\r\n \"@logtype\": {\r\n \"index\": \"not_analyzed\",\r\n \"type\": \"keyword\"\r\n },\r\n \"@timestamp\": {\r\n \"format\": \"strict_date_optional_time||epoch_millis\",\r\n \"index\": \"not_analyzed\",\r\n \"type\": \"date\"\r\n }\r\n }\r\n\r\n }\r\n },\r\n \"template\": \"test-bug\"\r\n}\r\n 2. upload doc / create index\r\nPOST /test-bug/logevent/\r\n{\r\n \"@timestamp\": \"2016-12-05T06:59:41.393Z\",\r\n \"@fields\": {\r\n \"IP\": \"70.92.108.189\"\r\n }\r\n}\r\n 3. query for field\r\nPOST /test-bug/logevent/_search\r\n{\r\n \"docvalue_fields\" : [\r\n \"@timestamp\",\r\n\t\"@fields.IP\"\r\n ]\r\n}\r\n\r\n**Result:**\r\n{\r\n \"took\": 1,\r\n \"timed_out\": false,\r\n \"_shards\": {\r\n \"total\": 1,\r\n \"successful\": 1,\r\n \"failed\": 0\r\n },\r\n \"hits\": {\r\n \"total\": 1,\r\n \"max_score\": 1,\r\n \"hits\": [\r\n {\r\n \"_index\": \"test-bug\",\r\n \"_type\": \"logevent\",\r\n \"_id\": \"AVjPswaq8VThoQGWfX4A\",\r\n \"_score\": 1,\r\n \"_source\": {\r\n \"@timestamp\": \"2016-12-05T06:59:41.393Z\",\r\n \"@fields\": {\r\n \"IP\": \"70.92.108.189\"\r\n }\r\n },\r\n \"fields\": {\r\n \"@fields.IP\": [\r\n **\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000��l½\"**\r\n ],\r\n \"@timestamp\": [\r\n 1480921181393\r\n ]\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n\r\n\r\n**Provide logs (if relevant)**:\r\n", "comments": [ { "body": "This is a bug indeed, I think we should return a formatted ip address as well as a formatted date.", "created_at": "2016-12-05T17:28:20Z" } ], "number": 21977, "title": "ip type returns garbage when queried as field" }
{ "body": "Currently we expose the internal representation that we use for ip addresses,\r\nwhich are the ipv6 bytes. However, this is not really usable, exposes internal\r\nimplementation details and also does not work fine with other APIs that expect\r\nthat the values can be `toString`'d.\r\n\r\nCloses #21977", "number": 21997, "review_comments": [], "title": "Expose `ip` fields as strings in scripts." }
{ "commits": [ { "message": "Expose `ip` fields as strings in scripts.\n\nCurrently we expose the internal representation that we use for ip addresses,\nwhich are the ipv6 bytes. However, this is not really usable, exposes internal\nimplementation details and also does not work fine with other APIs that expect\nthat the values can be `toString`'d.\n\nCloses #21977" } ], "files": [ { "diff": "@@ -31,13 +31,15 @@\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;\n import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;\n import org.elasticsearch.indices.breaker.CircuitBreakerService;\n \n import java.io.IOException;\n import java.util.Collection;\n import java.util.Collections;\n import java.util.concurrent.TimeUnit;\n+import java.util.function.Function;\n \n /**\n * Utility class to build global ordinals.\n@@ -48,7 +50,9 @@ public enum GlobalOrdinalsBuilder {\n /**\n * Build global ordinals for the provided {@link IndexReader}.\n */\n- public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger) throws IOException {\n+ public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData,\n+ IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger,\n+ Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) throws IOException {\n assert indexReader.leaves().size() > 1;\n long startTimeNS = System.nanoTime();\n \n@@ -71,7 +75,7 @@ public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexO\n );\n }\n return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),\n- atomicFD, ordinalMap, memorySizeInBytes\n+ atomicFD, ordinalMap, memorySizeInBytes, scriptFunction\n );\n }\n \n@@ -81,7 +85,7 @@ public static IndexOrdinalsFieldData buildEmpty(IndexSettings indexSettings, fin\n final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];\n final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];\n for (int i = 0; i < indexReader.leaves().size(); ++i) {\n- atomicFD[i] = new AbstractAtomicOrdinalsFieldData() {\n+ atomicFD[i] = new AbstractAtomicOrdinalsFieldData(AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION) {\n @Override\n public RandomAccessOrds getOrdinalsValues() {\n return DocValues.emptySortedSet();\n@@ -105,7 +109,7 @@ public void close() {\n }\n final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT);\n return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),\n- atomicFD, ordinalMap, 0\n+ atomicFD, ordinalMap, 0, AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION\n );\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java", "status": "modified" }, { "diff": "@@ -24,23 +24,28 @@\n import org.apache.lucene.util.Accountable;\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;\n \n import java.util.Collection;\n+import java.util.function.Function;\n \n /**\n * {@link org.elasticsearch.index.fielddata.IndexFieldData} impl based on global ordinals.\n */\n final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFieldData {\n \n private final Atomic[] atomicReaders;\n+ private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;\n \n- InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) {\n+ InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd,\n+ OrdinalMap ordinalMap, long memorySizeInBytes, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {\n super(indexSettings, fieldName, memorySizeInBytes);\n this.atomicReaders = new Atomic[segmentAfd.length];\n for (int i = 0; i < segmentAfd.length; i++) {\n atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i);\n }\n+ this.scriptFunction = scriptFunction;\n }\n \n @Override\n@@ -55,6 +60,7 @@ private final class Atomic extends AbstractAtomicOrdinalsFieldData {\n private final int segmentIndex;\n \n private Atomic(AtomicOrdinalsFieldData afd, OrdinalMap ordinalMap, int segmentIndex) {\n+ super(scriptFunction);\n this.afd = afd;\n this.ordinalMap = ordinalMap;\n this.segmentIndex = segmentIndex;", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/ordinals/InternalGlobalOrdinalsIndexFieldData.java", "status": "modified" }, { "diff": "@@ -29,13 +29,24 @@\n \n import java.util.Collection;\n import java.util.Collections;\n+import java.util.function.Function;\n \n \n public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsFieldData {\n \n+ public static final Function<RandomAccessOrds, ScriptDocValues<?>> DEFAULT_SCRIPT_FUNCTION =\n+ ((Function<RandomAccessOrds, SortedBinaryDocValues>) FieldData::toString)\n+ .andThen(ScriptDocValues.Strings::new);\n+\n+ private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;\n+\n+ protected AbstractAtomicOrdinalsFieldData(Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {\n+ this.scriptFunction = scriptFunction;\n+ }\n+\n @Override\n- public final ScriptDocValues getScriptValues() {\n- return new ScriptDocValues.Strings(getBytesValues());\n+ public final ScriptDocValues<?> getScriptValues() {\n+ return scriptFunction.apply(getOrdinalsValues());\n }\n \n @Override\n@@ -44,7 +55,7 @@ public final SortedBinaryDocValues getBytesValues() {\n }\n \n public static AtomicOrdinalsFieldData empty() {\n- return new AbstractAtomicOrdinalsFieldData() {\n+ return new AbstractAtomicOrdinalsFieldData(DEFAULT_SCRIPT_FUNCTION) {\n \n @Override\n public long ramBytesUsed() {", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicOrdinalsFieldData.java", "status": "modified" }, { "diff": "@@ -97,7 +97,8 @@ public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) {\n \n @Override\n public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {\n- return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger);\n+ return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger,\n+ AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java", "status": "modified" }, { "diff": "@@ -21,19 +21,22 @@\n \n import org.apache.logging.log4j.Logger;\n import org.apache.lucene.index.IndexReader;\n+import org.apache.lucene.index.RandomAccessOrds;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.fielddata.IndexFieldData;\n import org.elasticsearch.index.fielddata.IndexFieldDataCache;\n import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n import org.elasticsearch.index.mapper.IdFieldMapper;\n import org.elasticsearch.index.mapper.MappedFieldType;\n import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.mapper.UidFieldMapper;\n import org.elasticsearch.indices.breaker.CircuitBreakerService;\n \n import java.util.Set;\n+import java.util.function.Function;\n \n import static java.util.Collections.unmodifiableSet;\n import static org.elasticsearch.common.util.set.Sets.newHashSet;\n@@ -72,12 +75,18 @@ public static class Builder implements IndexFieldData.Builder {\n private static final Set<String> BINARY_INDEX_FIELD_NAMES = unmodifiableSet(newHashSet(UidFieldMapper.NAME, IdFieldMapper.NAME));\n \n private NumericType numericType;\n+ private Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION;\n \n public Builder numericType(NumericType type) {\n this.numericType = type;\n return this;\n }\n \n+ public Builder scriptFunction(Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {\n+ this.scriptFunction = scriptFunction;\n+ return this;\n+ }\n+\n @Override\n public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,\n CircuitBreakerService breakerService, MapperService mapperService) {\n@@ -89,7 +98,7 @@ public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fiel\n } else if (numericType != null) {\n return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType);\n } else {\n- return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService);\n+ return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService, scriptFunction);\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java", "status": "modified" }, { "diff": "@@ -56,6 +56,7 @@ private static class IndexAtomicFieldData extends AbstractAtomicOrdinalsFieldDat\n private final String index;\n \n IndexAtomicFieldData(String index) {\n+ super(DEFAULT_SCRIPT_FUNCTION);\n this.index = index;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java", "status": "modified" }, { "diff": "@@ -38,6 +38,7 @@ public class PagedBytesAtomicFieldData extends AbstractAtomicOrdinalsFieldData {\n protected final Ordinals ordinals;\n \n public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, PackedLongValues termOrdToBytesOffset, Ordinals ordinals) {\n+ super(DEFAULT_SCRIPT_FUNCTION);\n this.bytes = bytes;\n this.termOrdToBytesOffset = termOrdToBytesOffset;\n this.ordinals = ordinals;", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java", "status": "modified" }, { "diff": "@@ -25,10 +25,12 @@\n import org.apache.lucene.util.Accountable;\n import org.elasticsearch.index.fielddata.AtomicFieldData;\n import org.elasticsearch.index.fielddata.FieldData;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n \n import java.io.IOException;\n import java.util.Collection;\n import java.util.Collections;\n+import java.util.function.Function;\n \n /**\n * An {@link AtomicFieldData} implementation that uses Lucene {@link org.apache.lucene.index.SortedSetDocValues}.\n@@ -38,7 +40,9 @@ public final class SortedSetDVBytesAtomicFieldData extends AbstractAtomicOrdinal\n private final LeafReader reader;\n private final String field;\n \n- SortedSetDVBytesAtomicFieldData(LeafReader reader, String field) {\n+ SortedSetDVBytesAtomicFieldData(LeafReader reader, String field, Function<RandomAccessOrds,\n+ ScriptDocValues<?>> scriptFunction) {\n+ super(scriptFunction);\n this.reader = reader;\n this.field = field;\n }", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java", "status": "modified" }, { "diff": "@@ -21,31 +21,37 @@\n \n import org.apache.lucene.index.DirectoryReader;\n import org.apache.lucene.index.LeafReaderContext;\n+import org.apache.lucene.index.RandomAccessOrds;\n import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;\n import org.elasticsearch.index.fielddata.IndexFieldData;\n import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;\n import org.elasticsearch.index.fielddata.IndexFieldDataCache;\n import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;\n import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder;\n import org.elasticsearch.indices.breaker.CircuitBreakerService;\n import org.elasticsearch.search.MultiValueMode;\n \n import java.io.IOException;\n+import java.util.function.Function;\n \n public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData implements IndexOrdinalsFieldData {\n \n private final IndexSettings indexSettings;\n private final IndexFieldDataCache cache;\n private final CircuitBreakerService breakerService;\n+ private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;\n \n- public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName, CircuitBreakerService breakerService) {\n+ public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName,\n+ CircuitBreakerService breakerService, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {\n super(indexSettings.getIndex(), fieldName);\n this.indexSettings = indexSettings;\n this.cache = cache;\n this.breakerService = breakerService;\n+ this.scriptFunction = scriptFunction;\n }\n \n @Override\n@@ -55,7 +61,7 @@ public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource c\n \n @Override\n public AtomicOrdinalsFieldData load(LeafReaderContext context) {\n- return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName);\n+ return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName, scriptFunction);\n }\n \n @Override\n@@ -100,6 +106,6 @@ public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) {\n \n @Override\n public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {\n- return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger);\n+ return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger, scriptFunction);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.apache.lucene.index.IndexReader;\n import org.apache.lucene.index.IndexableField;\n import org.apache.lucene.index.PointValues;\n+import org.apache.lucene.index.RandomAccessOrds;\n import org.apache.lucene.search.MatchNoDocsQuery;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.util.BytesRef;\n@@ -38,15 +39,22 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.fielddata.IndexFieldData;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;\n import org.elasticsearch.index.query.QueryShardContext;\n import org.elasticsearch.search.DocValueFormat;\n import org.joda.time.DateTimeZone;\n \n import java.io.IOException;\n import java.net.InetAddress;\n+import java.util.AbstractList;\n+import java.util.Arrays;\n+import java.util.Collection;\n+import java.util.Collections;\n import java.util.Iterator;\n import java.util.List;\n+import java.util.ListIterator;\n import java.util.Map;\n \n /** A {@link FieldMapper} for ip addresses. */\n@@ -225,10 +233,50 @@ public FieldStats.Ip stats(IndexReader reader) throws IOException {\n InetAddressPoint.decode(min), InetAddressPoint.decode(max));\n }\n \n+ private static class IpScriptDocValues extends AbstractList<String> implements ScriptDocValues<String> {\n+\n+ private final RandomAccessOrds values;\n+\n+ IpScriptDocValues(RandomAccessOrds values) {\n+ this.values = values;\n+ }\n+\n+ @Override\n+ public void setNextDocId(int docId) {\n+ values.setDocument(docId);\n+ }\n+\n+ public String getValue() {\n+ if (isEmpty()) {\n+ return null;\n+ } else {\n+ return get(0);\n+ }\n+ }\n+\n+ @Override\n+ public List<String> getValues() {\n+ return Collections.unmodifiableList(this);\n+ }\n+\n+ @Override\n+ public String get(int index) {\n+ BytesRef encoded = values.lookupOrd(values.ordAt(0));\n+ InetAddress address = InetAddressPoint.decode(\n+ Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length));\n+ return InetAddresses.toAddrString(address);\n+ }\n+\n+ @Override\n+ public int size() {\n+ return values.cardinality();\n+ }\n+ }\n+\n @Override\n public IndexFieldData.Builder fielddataBuilder() {\n failIfNoDocValues();\n- return new DocValuesIndexFieldData.Builder();\n+ return new DocValuesIndexFieldData.Builder().scriptFunction(IpScriptDocValues::new);\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.IndexSettings;\n+import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;\n import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;\n import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData;\n import org.elasticsearch.index.mapper.TextFieldMapper;\n@@ -86,7 +87,8 @@ public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception {\n }\n \n private SortedSetDVOrdinalsIndexFieldData createSortedDV(String fieldName, IndexFieldDataCache indexFieldDataCache) {\n- return new SortedSetDVOrdinalsIndexFieldData(createIndexSettings(), indexFieldDataCache, fieldName, new NoneCircuitBreakerService());\n+ return new SortedSetDVOrdinalsIndexFieldData(createIndexSettings(), indexFieldDataCache, fieldName, new NoneCircuitBreakerService(),\n+ AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION);\n }\n \n private PagedBytesIndexFieldData createPagedBytes(String fieldName, IndexFieldDataCache indexFieldDataCache) {", "filename": "core/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java", "status": "modified" }, { "diff": "@@ -22,11 +22,46 @@\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;\n \n import org.elasticsearch.action.search.SearchResponse;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n+import org.elasticsearch.plugins.Plugin;\n+import org.elasticsearch.script.Script;\n+import org.elasticsearch.script.ScriptType;\n import org.elasticsearch.search.aggregations.AggregationBuilders;\n+import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin;\n import org.elasticsearch.search.aggregations.bucket.terms.Terms;\n \n+import java.util.Collection;\n+import java.util.Collections;\n+import java.util.Map;\n+import java.util.function.Function;\n+\n public class IpTermsIT extends AbstractTermsTestCase {\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> nodePlugins() {\n+ return Collections.singleton(CustomScriptPlugin.class);\n+ }\n+\n+ public static class CustomScriptPlugin extends AggregationTestScriptsPlugin {\n+\n+ @Override\n+ protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {\n+ Map<String, Function<Map<String, Object>, Object>> scripts = super.pluginScripts();\n+\n+ scripts.put(\"doc['ip'].value\", vars -> {\n+ Map<?, ?> doc = (Map<?,?>) vars.get(\"doc\");\n+ return doc.get(\"ip\");\n+ });\n+\n+ scripts.put(\"doc['ip'].values\", vars -> {\n+ Map<?, ?> doc = (Map<?,?>) vars.get(\"doc\");\n+ return ((ScriptDocValues<?>) doc.get(\"ip\")).get(0);\n+ });\n+\n+ return scripts;\n+ }\n+ }\n+\n public void testBasics() throws Exception {\n assertAcked(prepareCreate(\"index\").addMapping(\"type\", \"ip\", \"type=ip\"));\n indexRandom(true,\n@@ -51,4 +86,55 @@ public void testBasics() throws Exception {\n assertEquals(\"2001:db8::2:1\", bucket2.getKeyAsString());\n }\n \n+ public void testScriptValue() throws Exception {\n+ assertAcked(prepareCreate(\"index\").addMapping(\"type\", \"ip\", \"type=ip\"));\n+ indexRandom(true,\n+ client().prepareIndex(\"index\", \"type\", \"1\").setSource(\"ip\", \"192.168.1.7\"),\n+ client().prepareIndex(\"index\", \"type\", \"2\").setSource(\"ip\", \"192.168.1.7\"),\n+ client().prepareIndex(\"index\", \"type\", \"3\").setSource(\"ip\", \"2001:db8::2:1\"));\n+\n+ Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,\n+ \"doc['ip'].value\", Collections.emptyMap());\n+ SearchResponse response = client().prepareSearch(\"index\").addAggregation(\n+ AggregationBuilders.terms(\"my_terms\").script(script).executionHint(randomExecutionHint())).get();\n+ assertSearchResponse(response);\n+ Terms terms = response.getAggregations().get(\"my_terms\");\n+ assertEquals(2, terms.getBuckets().size());\n+\n+ Terms.Bucket bucket1 = terms.getBuckets().get(0);\n+ assertEquals(2, bucket1.getDocCount());\n+ assertEquals(\"192.168.1.7\", bucket1.getKey());\n+ assertEquals(\"192.168.1.7\", bucket1.getKeyAsString());\n+\n+ Terms.Bucket bucket2 = terms.getBuckets().get(1);\n+ assertEquals(1, bucket2.getDocCount());\n+ assertEquals(\"2001:db8::2:1\", bucket2.getKey());\n+ assertEquals(\"2001:db8::2:1\", bucket2.getKeyAsString());\n+ }\n+\n+ public void testScriptValues() throws Exception {\n+ assertAcked(prepareCreate(\"index\").addMapping(\"type\", \"ip\", \"type=ip\"));\n+ indexRandom(true,\n+ client().prepareIndex(\"index\", \"type\", \"1\").setSource(\"ip\", \"192.168.1.7\"),\n+ client().prepareIndex(\"index\", \"type\", \"2\").setSource(\"ip\", \"192.168.1.7\"),\n+ client().prepareIndex(\"index\", \"type\", \"3\").setSource(\"ip\", \"2001:db8::2:1\"));\n+\n+ Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,\n+ \"doc['ip'].values\", Collections.emptyMap());\n+ SearchResponse response = client().prepareSearch(\"index\").addAggregation(\n+ AggregationBuilders.terms(\"my_terms\").script(script).executionHint(randomExecutionHint())).get();\n+ assertSearchResponse(response);\n+ Terms terms = response.getAggregations().get(\"my_terms\");\n+ assertEquals(2, terms.getBuckets().size());\n+\n+ Terms.Bucket bucket1 = terms.getBuckets().get(0);\n+ assertEquals(2, bucket1.getDocCount());\n+ assertEquals(\"192.168.1.7\", bucket1.getKey());\n+ assertEquals(\"192.168.1.7\", bucket1.getKeyAsString());\n+\n+ Terms.Bucket bucket2 = terms.getBuckets().get(1);\n+ assertEquals(1, bucket2.getDocCount());\n+ assertEquals(\"2001:db8::2:1\", bucket2.getKey());\n+ assertEquals(\"2001:db8::2:1\", bucket2.getKeyAsString());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java", "status": "modified" }, { "diff": "@@ -766,6 +766,9 @@ public void testFieldsPulledFromFieldData() throws Exception {\n .startObject(\"binary_field\")\n .field(\"type\", \"binary\")\n .endObject()\n+ .startObject(\"ip_field\")\n+ .field(\"type\", \"ip\")\n+ .endObject()\n .endObject()\n .endObject()\n .endObject()\n@@ -784,6 +787,7 @@ public void testFieldsPulledFromFieldData() throws Exception {\n .field(\"double_field\", 6.0d)\n .field(\"date_field\", Joda.forPattern(\"dateOptionalTime\").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))\n .field(\"boolean_field\", true)\n+ .field(\"ip_field\", \"::1\")\n .endObject()).execute().actionGet();\n \n client().admin().indices().prepareRefresh().execute().actionGet();\n@@ -798,14 +802,16 @@ public void testFieldsPulledFromFieldData() throws Exception {\n .addDocValueField(\"float_field\")\n .addDocValueField(\"double_field\")\n .addDocValueField(\"date_field\")\n- .addDocValueField(\"boolean_field\");\n+ .addDocValueField(\"boolean_field\")\n+ .addDocValueField(\"ip_field\");\n SearchResponse searchResponse = builder.execute().actionGet();\n \n assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));\n assertThat(searchResponse.getHits().hits().length, equalTo(1));\n Set<String> fields = new HashSet<>(searchResponse.getHits().getAt(0).fields().keySet());\n assertThat(fields, equalTo(newHashSet(\"byte_field\", \"short_field\", \"integer_field\", \"long_field\",\n- \"float_field\", \"double_field\", \"date_field\", \"boolean_field\", \"text_field\", \"keyword_field\")));\n+ \"float_field\", \"double_field\", \"date_field\", \"boolean_field\", \"text_field\", \"keyword_field\",\n+ \"ip_field\")));\n \n assertThat(searchResponse.getHits().getAt(0).fields().get(\"byte_field\").value().toString(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(0).fields().get(\"short_field\").value().toString(), equalTo(\"2\"));\n@@ -817,6 +823,7 @@ public void testFieldsPulledFromFieldData() throws Exception {\n assertThat(searchResponse.getHits().getAt(0).fields().get(\"boolean_field\").value(), equalTo((Object) true));\n assertThat(searchResponse.getHits().getAt(0).fields().get(\"text_field\").value(), equalTo(\"foo\"));\n assertThat(searchResponse.getHits().getAt(0).fields().get(\"keyword_field\").value(), equalTo(\"foo\"));\n+ assertThat(searchResponse.getHits().getAt(0).fields().get(\"ip_field\").value(), equalTo(\"::1\"));\n }\n \n public void testScriptFields() throws Exception {", "filename": "core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.2\r\n\r\n**Plugins installed**: []\r\n\r\n**JVM version**: 1.8.0_66\r\n\r\n**OS version**: Mac OS X 10.12.1 (Sierra)\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nDuring a \"filters\" aggregation, setting the ```other_bucket_key``` does NOT implicitly set the ```other_bucket``` option to true, as mentioned in : https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html#_literal_other_literal_bucket\r\n\r\n**Steps to reproduce**:\r\n 1. Create a \"filters\" aggregation\r\n 2. Set ```other_bucket_key``` option to the desired bucket name, but do NOT include the ```other_bucket: true``` option.\r\n 3. The other_bucket is missing from the response.\r\n\r\nReproduce:\r\n```\r\ncurl -XPUT 'localhost:9200/test/'\r\n\r\ncurl -XPUT 'localhost:9200/test/document/1?pretty' -d'\r\n {\r\n \"a\" : 1,\r\n \"b\" : 2\r\n }'\r\n\r\ncurl -XPUT 'localhost:9200/test/document/2?pretty' -d'\r\n {\r\n \"a\" : 1,\r\n \"b\" : 3\r\n }'\r\n\r\ncurl -XPUT 'localhost:9200/test/document/3?pretty' -d'\r\n {\r\n \"a\" : 1,\r\n \"b\" : 4\r\n }'\r\n\r\ncurl -XGET 'localhost:9200/test/_search' -d'\r\n {\r\n \"aggs\" : {\r\n \"messages\" : {\r\n \"filters\" : {\r\n \"other_bucket_key\": \"bucket2\",\r\n \"filters\" : {\r\n \"bucket1\" : { \"term\" : { \"b\" : \"3\" }}\r\n }\r\n }\r\n }\r\n },\r\n \"size\": 0\r\n }'\r\n\r\n=> {\r\n \"took\":1,\r\n \"timed_out\":false,\r\n \"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\r\n \"hits\":{\"total\":3,\"max_score\":0.0,\"hits\":[]},\r\n \"aggregations\":{\r\n \"messages\":{\r\n \"buckets\":{\r\n \"bucket1\":{\"doc_count\":1} <== bucket2 is missing\r\n }\r\n }\r\n }\r\n}\r\n\r\ncurl -XGET 'localhost:9200/test/_search' -d'\r\n {\r\n \"aggs\" : {\r\n \"messages\" : {\r\n \"filters\" : {\r\n \"other_bucket\": true, \r\n \"other_bucket_key\": \"bucket2\",\r\n \"filters\" : {\r\n \"bucket1\" : { \"term\" : { \"b\" : \"3\" }}\r\n }\r\n }\r\n }\r\n },\r\n \"size\": 0\r\n }'\r\n\r\n=> {\r\n \"took\":1,\r\n \"timed_out\":false,\r\n \"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\r\n \"hits\":{\"total\":3,\"max_score\":0.0,\"hits\":[]},\r\n \"aggregations\":{\r\n \"messages\":{\r\n \"buckets\":{\r\n \"bucket1\":{\"doc_count\":1},\r\n \"bucket2\":{\"doc_count\":2} <== NOW bucket2 is present\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Provide logs (if relevant)**:\r\n\r\n", "comments": [], "number": 21951, "title": "other_bucket_key does not implicitly set other_bucket => true" }
{ "body": "Closes #21951", "number": 21994, "review_comments": [], "title": "Fix `other_bucket` on the `filters` agg to be enabled if a key is set." }
{ "commits": [ { "message": "Fix `other_bucket` on the `filters` agg to be enabled if a key is set.\n\nCloses #21951" } ], "files": [ { "diff": "@@ -209,7 +209,7 @@ public static FiltersAggregationBuilder parse(String aggregationName, QueryParse\n XContentParser.Token token = null;\n String currentFieldName = null;\n String otherBucketKey = null;\n- Boolean otherBucket = false;\n+ Boolean otherBucket = null;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n@@ -260,8 +260,9 @@ public static FiltersAggregationBuilder parse(String aggregationName, QueryParse\n }\n }\n \n- if (otherBucket && otherBucketKey == null) {\n- otherBucketKey = \"_other_\";\n+ if (otherBucket == null && otherBucketKey != null) {\n+ // automatically enable the other bucket if a key is set, as per the doc\n+ otherBucket = true;\n }\n \n FiltersAggregationBuilder factory;", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -19,11 +19,23 @@\n \n package org.elasticsearch.search.aggregations.metrics;\n \n+import org.elasticsearch.common.ParseFieldMatcher;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.common.xcontent.XContentHelper;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.index.query.MatchNoneQueryBuilder;\n import org.elasticsearch.index.query.QueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n+import org.elasticsearch.index.query.QueryParseContext;\n+import org.elasticsearch.indices.query.IndicesQueriesRegistry;\n import org.elasticsearch.search.aggregations.BaseAggregationTestCase;\n import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator.KeyedFilter;\n+import org.elasticsearch.search.slice.SliceBuilder;\n+\n+import java.io.IOException;\n+\n import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;\n \n public class FiltersTests extends BaseAggregationTestCase<FiltersAggregationBuilder> {\n@@ -73,4 +85,42 @@ public void testFiltersSortedByKey() {\n assertEquals(\"aaa\", original[1].key());\n }\n \n+ public void testOtherBucket() throws IOException {\n+ XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));\n+ builder.startObject();\n+ builder.startArray(\"filters\").endArray();\n+ builder.endObject();\n+ XContentParser parser = XContentHelper.createParser(shuffleXContent(builder).bytes());\n+ parser.nextToken();\n+ QueryParseContext context = new QueryParseContext(new IndicesQueriesRegistry(), parser,\n+ ParseFieldMatcher.STRICT);\n+ FiltersAggregationBuilder filters = FiltersAggregationBuilder.parse(\"agg_name\", context);\n+ // The other bucket is disabled by default\n+ assertFalse(filters.otherBucket());\n+\n+ builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));\n+ builder.startObject();\n+ builder.startArray(\"filters\").endArray();\n+ builder.field(\"other_bucket_key\", \"some_key\");\n+ builder.endObject();\n+ parser = XContentHelper.createParser(shuffleXContent(builder).bytes());\n+ parser.nextToken();\n+ context = new QueryParseContext(new IndicesQueriesRegistry(), parser, ParseFieldMatcher.STRICT);\n+ filters = FiltersAggregationBuilder.parse(\"agg_name\", context);\n+ // but setting a key enables it automatically\n+ assertTrue(filters.otherBucket());\n+\n+ builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));\n+ builder.startObject();\n+ builder.startArray(\"filters\").endArray();\n+ builder.field(\"other_bucket\", false);\n+ builder.field(\"other_bucket_key\", \"some_key\");\n+ builder.endObject();\n+ parser = XContentHelper.createParser(shuffleXContent(builder).bytes());\n+ parser.nextToken();\n+ context = new QueryParseContext(new IndicesQueriesRegistry(), parser, ParseFieldMatcher.STRICT);\n+ filters = FiltersAggregationBuilder.parse(\"agg_name\", context);\n+ // unless the other bucket is explicitly disabled\n+ assertFalse(filters.otherBucket());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/metrics/FiltersTests.java", "status": "modified" }, { "diff": "@@ -28,7 +28,6 @@ buildRestTests.expectedUnconvertedCandidates = [\n 'reference/aggregations/bucket/daterange-aggregation.asciidoc',\n 'reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc',\n 'reference/aggregations/bucket/filter-aggregation.asciidoc',\n- 'reference/aggregations/bucket/filters-aggregation.asciidoc',\n 'reference/aggregations/bucket/geodistance-aggregation.asciidoc',\n 'reference/aggregations/bucket/geohashgrid-aggregation.asciidoc',\n 'reference/aggregations/bucket/global-aggregation.asciidoc',", "filename": "docs/build.gradle", "status": "modified" }, { "diff": "@@ -9,62 +9,61 @@ Example:\n \n [source,js]\n --------------------------------------------------\n+PUT /logs/message/_bulk?refresh\n+{ \"index\" : { \"_id\" : 1 } }\n+{ \"body\" : \"warning: page could not be rendered\" }\n+{ \"index\" : { \"_id\" : 2 } }\n+{ \"body\" : \"authentication error\" }\n+{ \"index\" : { \"_id\" : 3 } }\n+{ \"body\" : \"warning: connection timed out\" }\n+\n+GET logs/_search\n {\n+ \"size\": 0,\n \"aggs\" : {\n \"messages\" : {\n \"filters\" : {\n \"filters\" : {\n- \"errors\" : { \"term\" : { \"body\" : \"error\" }},\n- \"warnings\" : { \"term\" : { \"body\" : \"warning\" }}\n- }\n- },\n- \"aggs\" : {\n- \"monthly\" : {\n- \"histogram\" : {\n- \"field\" : \"timestamp\",\n- \"interval\" : \"1M\"\n- }\n+ \"errors\" : { \"match\" : { \"body\" : \"error\" }},\n+ \"warnings\" : { \"match\" : { \"body\" : \"warning\" }}\n }\n }\n }\n }\n }\n --------------------------------------------------\n+// CONSOLE\n \n In the above example, we analyze log messages. The aggregation will build two\n collection (buckets) of log messages - one for all those containing an error,\n-and another for all those containing a warning. And for each of these buckets\n-it will break them down by month.\n+and another for all those containing a warning.\n \n Response:\n \n [source,js]\n --------------------------------------------------\n-...\n- \"aggs\" : {\n- \"messages\" : {\n- \"buckets\" : {\n- \"errors\" : {\n- \"doc_count\" : 34,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n+{\n+ \"took\": 9,\n+ \"timed_out\": false,\n+ \"_shards\": ...,\n+ \"hits\": ...,\n+ \"aggregations\": {\n+ \"messages\": {\n+ \"buckets\": {\n+ \"errors\": {\n+ \"doc_count\": 1\n },\n- \"warnings\" : {\n- \"doc_count\" : 439,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n+ \"warnings\": {\n+ \"doc_count\": 2\n }\n }\n }\n }\n-...\n+}\n --------------------------------------------------\n+// TESTRESPONSE[s/\"took\": 9/\"took\": $body.took/]\n+// TESTRESPONSE[s/\"_shards\": \\.\\.\\./\"_shards\": $body._shards/]\n+// TESTRESPONSE[s/\"hits\": \\.\\.\\./\"hits\": $body.hits/]\n \n ==== Anonymous filters\n \n@@ -73,58 +72,51 @@ following request:\n \n [source,js]\n --------------------------------------------------\n+GET logs/_search\n {\n+ \"size\": 0,\n \"aggs\" : {\n \"messages\" : {\n \"filters\" : {\n \"filters\" : [\n- { \"term\" : { \"body\" : \"error\" }},\n- { \"term\" : { \"body\" : \"warning\" }}\n+ { \"match\" : { \"body\" : \"error\" }},\n+ { \"match\" : { \"body\" : \"warning\" }}\n ]\n- },\n- \"aggs\" : {\n- \"monthly\" : {\n- \"histogram\" : {\n- \"field\" : \"timestamp\",\n- \"interval\" : \"1M\"\n- }\n- }\n }\n }\n }\n }\n --------------------------------------------------\n+// CONSOLE\n+// TEST[continued]\n \n The filtered buckets are returned in the same order as provided in the\n request. The response for this example would be:\n \n [source,js]\n --------------------------------------------------\n-...\n- \"aggs\" : {\n- \"messages\" : {\n- \"buckets\" : [\n+{\n+ \"took\": 4,\n+ \"timed_out\": false,\n+ \"_shards\": ...,\n+ \"hits\": ...,\n+ \"aggregations\": {\n+ \"messages\": {\n+ \"buckets\": [\n {\n- \"doc_count\" : 34,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n+ \"doc_count\": 1\n },\n {\n- \"doc_count\" : 439,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n+ \"doc_count\": 2\n }\n ]\n }\n }\n-...\n+}\n --------------------------------------------------\n+// TESTRESPONSE[s/\"took\": 4/\"took\": $body.took/]\n+// TESTRESPONSE[s/\"_shards\": \\.\\.\\./\"_shards\": $body._shards/]\n+// TESTRESPONSE[s/\"hits\": \\.\\.\\./\"hits\": $body.hits/]\n \n ==== `Other` Bucket\n \n@@ -142,64 +134,56 @@ The following snippet shows a response where the `other` bucket is requested to\n \n [source,js]\n --------------------------------------------------\n+PUT logs/message/4?refresh\n {\n+ \"body\": \"info: user Bob logged out\"\n+}\n+\n+GET logs/_search\n+{\n+ \"size\": 0,\n \"aggs\" : {\n \"messages\" : {\n \"filters\" : {\n \"other_bucket_key\": \"other_messages\",\n \"filters\" : {\n- \"errors\" : { \"term\" : { \"body\" : \"error\" }},\n- \"warnings\" : { \"term\" : { \"body\" : \"warning\" }}\n- }\n- },\n- \"aggs\" : {\n- \"monthly\" : {\n- \"histogram\" : {\n- \"field\" : \"timestamp\",\n- \"interval\" : \"1M\"\n- }\n+ \"errors\" : { \"match\" : { \"body\" : \"error\" }},\n+ \"warnings\" : { \"match\" : { \"body\" : \"warning\" }}\n }\n }\n }\n }\n }\n --------------------------------------------------\n+// CONSOLE\n+// TEST[continued]\n \n The response would be something like the following:\n \n [source,js]\n --------------------------------------------------\n-...\n- \"aggs\" : {\n- \"messages\" : {\n- \"buckets\" : {\n- \"errors\" : {\n- \"doc_count\" : 34,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n- },\n- \"warnings\" : {\n- \"doc_count\" : 439,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n- },\n- \"other_messages\" : {\n- \"doc_count\" : 237,\n- \"monthly\" : {\n- \"buckets\" : [\n- ... // the histogram monthly breakdown\n- ]\n- }\n- }\n+{\n+ \"took\": 3,\n+ \"timed_out\": false,\n+ \"_shards\": ...,\n+ \"hits\": ...,\n+ \"aggregations\": {\n+ \"messages\": {\n+ \"buckets\": {\n+ \"errors\": {\n+ \"doc_count\": 1\n+ },\n+ \"warnings\": {\n+ \"doc_count\": 2\n+ },\n+ \"other_messages\": {\n+ \"doc_count\": 1\n }\n }\n }\n }\n-...\n+}\n --------------------------------------------------\n+// TESTRESPONSE[s/\"took\": 3/\"took\": $body.took/]\n+// TESTRESPONSE[s/\"_shards\": \\.\\.\\./\"_shards\": $body._shards/]\n+// TESTRESPONSE[s/\"hits\": \\.\\.\\./\"hits\": $body.hits/]", "filename": "docs/reference/aggregations/bucket/filters-aggregation.asciidoc", "status": "modified" } ] }
{ "body": "Assume a 3-node cluster (1 master, 2 data nodes) with an index with 1 primary and 1 replica. Decommission the node with the replica shard by shutting it down and wiping its state. The master moves the replica shard to unassigned, but keeps its allocation id in the in-sync set as long as no replication operations happen on the primary. Now, decrease the number of replicas to zero. This does not remove the allocation id of the replica from the in-sync set. Shut down the node with the primary. This will move the primary shard to unassigned AND update the in-sync set:\r\nA logic in `IndexMetaDataUpdater` comes into play that limits the number of in-sync replica entries to the maximum number of shards that can be active (namely 1, as we have decreased the number of replicas to 0). The set of in-sync allocation ids contains the allocation id of the primary and the replica. The algorithm in `IndexMetaDataUpdater` has no way to chose which id to eliminate from the in-sync replica set and eliminates one at random. Assume the primary is eliminated. After restarting the node with the primary, it cannot be automatically allocated as its shard copy does not match the entry in the in-sync replica set.", "comments": [ { "body": "I am not sure if this is the write place to put this\r\nBut, I ran into something like this twice now\r\n\r\nI did following operations (I don't remember the order of these)\r\n\r\n\"Added new node\"\r\n\"Disabled allocation\"\r\n\"Added more nodes\"\r\n\"Excluded few nodes from shard allocation \"\r\n\"Changed # of replicas to 0\"\r\n\r\nI also had changed node awareness in between (disabled by setting awareness attribute to `null`)\r\n\r\nI ended up with unassigned shards even after reverting all cluster updates I did before.\r\nI looked at the disk and I could see that shard data is there. \r\nRouting table in cluster state says `NODE_LEFT` as the reason\r\n\r\nI waited for almost an hour, some shards didn't get allocated. I manually pinged `_cluster/reroute` which didn't make any difference.\r\n\r\nI had to restart one of the node where those unassigned shard data is there on disk. Even then some indices are still in unassigned state.\r\n\r\nRouting information for the shard that didn't get allocated even after restarting\r\n```\r\n \"3\": [\r\n {\r\n \"state\": \"UNASSIGNED\",\r\n \"primary\": true,\r\n \"node\": null,\r\n \"relocating_node\": null,\r\n \"shard\": 3,\r\n \"index\": \"test\",\r\n \"recovery_source\": {\r\n \"type\": \"EXISTING_STORE\"\r\n },\r\n \"unassigned_info\": {\r\n \"reason\": \"NODE_LEFT\",\r\n \"at\": \"2016-11-30T09:05:10.817Z\",\r\n \"delayed\": false,\r\n \"details\": \"node_left[fWo-O27hRW-egfvtpQO-BQ]\",\r\n \"allocation_status\": \"no_valid_shard_copy\"\r\n }\r\n },\r\n```\r\ncurl -XGET 'http://localhost:9200/_shard_stores?pretty'\r\n```\r\n{\r\n \"indices\" : {\r\n \"test\" : {\r\n \"shards\" : {\r\n \"3\" : {\r\n \"stores\" : [\r\n {\r\n \"fWo-O27hRW-egfvtpQO-BQ\" : {\r\n \"name\" : \"data-primary-255684\",\r\n \"ephemeral_id\" : \"aJV_xqKqR6i_QLC1lWortg\",\r\n \"transport_address\" : \"10.0.34.17:9300\",\r\n \"attributes\" : {\r\n \"rack_id\" : \"primary\"\r\n }\r\n },\r\n \"allocation_id\" : \"45790dx0S9Sb2T7nHQnbGw\",\r\n \"allocation\" : \"unused\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n}\r\n```", "created_at": "2016-11-30T11:04:13Z" }, { "body": "Can you also provide the output of \r\n```\r\ncurl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d'{\r\n \"index\": \"test\",\r\n \"shard\": 3,\r\n \"primary\": true\r\n}'\r\n```\r\n\r\nAs workaround, if you're **absolutely sure** that only this one node has data for this shard (i.e. all nodes were available while running the `_shard_stores` command), you can use the `allocate_stale_primary` command (which is part of the `reroute` command, see https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html ) to explicitly tell the system to chose the shard data on node `data-primary-255684` as primary copy. The risk with running the `allocate_stale_primary` command is that if the data on that node is not the most recent one (there exists another newer copy of the data somewhere else), you will experience data loss as the newer shard copy will be discarded.\r\n", "created_at": "2016-11-30T11:30:49Z" }, { "body": "I am doing some modifications to the cluster, so this output may have some irrelevant info\r\nhttps://gist.github.com/vanga/7ff86fc51127e2be42670cd4c78cd9ec\r\n\r\nMain index was fixed by restarting the node, not sure why this shard didn't get allocated even though I see corresponding files for this shard on disk.\r\n\r\n> you can use the allocate_stale_primary command (which is part of the reroute command, \r\n\r\nAfter doing this manually now its allocated.\r\nThanks.\r\n", "created_at": "2016-11-30T11:48:20Z" } ], "number": 21719, "title": "Adjusting the number of replicas can remove valid shard copies from the in-sync set" }
{ "body": "This PR makes two changes to how the in-sync allocations set is updated:\r\n- the set is only trimmed when it grows. This prevents trimming too eagerly when the number of replicas was decreased while shards were unassigned.\r\n- the allocation id of an active primary that failed is only removed from the in-sync set if another replica gets promoted to primary. This prevents the situation where the only available shard copy in the cluster gets removed the in-sync set.\r\n\r\nRelates to #21719", "number": 21976, "review_comments": [ { "body": "Could this hypothetically still happen if the number of replicas for an index was decreased while shards were unassigned, and simultaneously new indices were created so that the newly added check would still trim the allocation ids?", "created_at": "2016-12-05T20:56:00Z" }, { "body": "the in-sync set is managed on a per-shard id (index name + shard number) basis so that scenario does not apply.", "created_at": "2016-12-05T21:26:26Z" }, { "body": "Okay, glad to hear that; thanks for the clarification!", "created_at": "2016-12-05T22:52:04Z" }, { "body": "I'm wondering if we should just never remove if there are no active copies? it's a simpler edge case to understand and it covers this scenario. I think it's even equivalent (i.e., the last active shard must be a primary).", "created_at": "2016-12-06T10:44:21Z" }, { "body": "If I understand you correctly, you mean \"don't remove any allocation ids from the in-sync set if there are no active shards in the new routing table\"? If so, that's not safe to do. For an explanation, see this test here that covers that scenario: https://github.com/elastic/elasticsearch/blob/e5f1eb5dc935695be088aaea6828fff66d250b4e/core/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java#L177-L183", "created_at": "2016-12-06T12:43:34Z" }, { "body": "I'm happy we have all these tests. It is also another data point to move in the direction we discussed - i.e., failures should mark things as stale.", "created_at": "2016-12-06T15:07:19Z" } ], "title": "Trim in-sync allocations set only when it grows" }
{ "commits": [ { "message": "Trim in-sync set only when it grows" } ], "files": [ { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;\n import org.elasticsearch.cluster.routing.RecoverySource;\n import org.elasticsearch.cluster.routing.RoutingChangesObserver;\n import org.elasticsearch.cluster.routing.RoutingTable;\n@@ -174,10 +175,13 @@ private IndexMetaData.Builder updateInSyncAllocations(RoutingTable newRoutingTab\n // Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary\n // but repeatedly shut down nodes that have active replicas.\n // We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set\n+ // Only trim the set of allocation ids when it grows, otherwise we might trim too eagerly when the number\n+ // of replicas was decreased while shards were unassigned.\n int maxActiveShards = oldIndexMetaData.getNumberOfReplicas() + 1; // +1 for the primary\n- if (inSyncAllocationIds.size() > maxActiveShards) {\n+ IndexShardRoutingTable newShardRoutingTable = newRoutingTable.shardRoutingTable(shardId);\n+ if (inSyncAllocationIds.size() > oldInSyncAllocationIds.size() && inSyncAllocationIds.size() > maxActiveShards) {\n // trim entries that have no corresponding shard routing in the cluster state (i.e. trim unavailable copies)\n- List<ShardRouting> assignedShards = newRoutingTable.shardRoutingTable(shardId).assignedShards();\n+ List<ShardRouting> assignedShards = newShardRoutingTable.assignedShards();\n assert assignedShards.size() <= maxActiveShards :\n \"cannot have more assigned shards \" + assignedShards + \" than maximum possible active shards \" + maxActiveShards;\n Set<String> assignedAllocations = assignedShards.stream().map(s -> s.allocationId().getId()).collect(Collectors.toSet());\n@@ -187,16 +191,12 @@ private IndexMetaData.Builder updateInSyncAllocations(RoutingTable newRoutingTab\n .collect(Collectors.toSet());\n }\n \n- // only update in-sync allocation ids if there is at least one entry remaining. Assume for example that there only\n- // ever was a primary active and now it failed. If we were to remove the allocation id from the in-sync set, this would\n- // create an empty primary on the next allocation (see ShardRouting#allocatedPostIndexCreate)\n- if (inSyncAllocationIds.isEmpty() && oldInSyncAllocationIds.isEmpty() == false) {\n- assert updates.firstFailedPrimary != null :\n- \"in-sync set became empty but active primary wasn't failed: \" + oldInSyncAllocationIds;\n- if (updates.firstFailedPrimary != null) {\n- // add back allocation id of failed primary\n- inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId());\n- }\n+ // only remove allocation id of failed active primary if there is at least one active shard remaining. Assume for example that\n+ // the primary fails but there is no new primary to fail over to. If we were to remove the allocation id of the primary from the\n+ // in-sync set, this could create an empty primary on the next allocation.\n+ if (newShardRoutingTable.activeShards().isEmpty() && updates.firstFailedPrimary != null) {\n+ // add back allocation id of failed primary\n+ inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId());\n }\n \n assert inSyncAllocationIds.isEmpty() == false || oldInSyncAllocationIds.isEmpty() :", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java", "status": "modified" }, { "diff": "@@ -259,6 +259,88 @@ public void testInSyncIdsNotGrowingWithoutBounds() throws Exception {\n assertThat(newInSyncSet, hasItem(primaryShard.allocationId().getId()));\n }\n \n+ /**\n+ * Only trim set of allocation ids when the set grows\n+ */\n+ public void testInSyncIdsNotTrimmedWhenNotGrowing() throws Exception {\n+ ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);\n+\n+ Set<String> inSyncSet = clusterState.metaData().index(\"test\").inSyncAllocationIds(0);\n+ assertThat(inSyncSet.size(), equalTo(2));\n+\n+ IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index(\"test\").shard(0);\n+ ShardRouting primaryShard = shardRoutingTable.primaryShard();\n+ ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);\n+\n+ logger.info(\"remove replica node\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())\n+ .remove(replicaShard.currentNodeId()))\n+ .build();\n+ clusterState = allocation.deassociateDeadNodes(clusterState, true, \"reroute\");\n+\n+ // in-sync allocation ids should not be updated\n+ assertEquals(inSyncSet, clusterState.metaData().index(\"test\").inSyncAllocationIds(0));\n+\n+ logger.info(\"remove primary node\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())\n+ .remove(primaryShard.currentNodeId()))\n+ .build();\n+ clusterState = allocation.deassociateDeadNodes(clusterState, true, \"reroute\");\n+\n+ // in-sync allocation ids should not be updated\n+ assertEquals(inSyncSet, clusterState.metaData().index(\"test\").inSyncAllocationIds(0));\n+\n+ logger.info(\"decrease number of replicas to 0\");\n+ clusterState = ClusterState.builder(clusterState)\n+ .routingTable(RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(0, \"test\").build())\n+ .metaData(MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(0, \"test\")).build();\n+\n+ logger.info(\"add back node 1\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(\n+ newNode(\"node1\"))).build();\n+ clusterState = allocation.reroute(clusterState, \"reroute\");\n+\n+ assertThat(clusterState.routingTable().index(\"test\").shard(0).assignedShards().size(), equalTo(1));\n+ // in-sync allocation ids should not be updated\n+ assertEquals(inSyncSet, clusterState.metaData().index(\"test\").inSyncAllocationIds(0));\n+\n+ logger.info(\"start primary shard\");\n+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));\n+ // in-sync allocation ids should not be updated\n+ assertEquals(inSyncSet, clusterState.metaData().index(\"test\").inSyncAllocationIds(0));\n+ }\n+\n+ /**\n+ * Don't remove allocation id of failed active primary if there is no replica to promote as primary.\n+ */\n+ public void testPrimaryAllocationIdNotRemovedFromInSyncSetWhenNoFailOver() throws Exception {\n+ ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);\n+\n+ Set<String> inSyncSet = clusterState.metaData().index(\"test\").inSyncAllocationIds(0);\n+ assertThat(inSyncSet.size(), equalTo(2));\n+\n+ IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index(\"test\").shard(0);\n+ ShardRouting primaryShard = shardRoutingTable.primaryShard();\n+ ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);\n+\n+ logger.info(\"remove replica node\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())\n+ .remove(replicaShard.currentNodeId()))\n+ .build();\n+ clusterState = allocation.deassociateDeadNodes(clusterState, true, \"reroute\");\n+\n+ // in-sync allocation ids should not be updated\n+ assertEquals(inSyncSet, clusterState.metaData().index(\"test\").inSyncAllocationIds(0));\n+\n+ logger.info(\"fail primary shard\");\n+ clusterState = failedClusterStateTaskExecutor.execute(clusterState, Collections.singletonList(new ShardEntry(\n+ shardRoutingTable.shardId(), primaryShard.allocationId().getId(), 0L, \"dummy\", null))).resultingState;\n+\n+ assertThat(clusterState.routingTable().index(\"test\").shard(0).assignedShards().size(), equalTo(0));\n+ // in-sync allocation ids should not be updated\n+ assertEquals(inSyncSet, clusterState.metaData().index(\"test\").inSyncAllocationIds(0));\n+ }\n+\n private ClusterState createOnePrimaryOneReplicaClusterState(AllocationService allocation) {\n logger.info(\"creating an index with 1 shard, 1 replica\");\n MetaData metaData = MetaData.builder()", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: master\n\n**Plugins installed**: none\n\n**JVM version**: 1.8.0_102\n\n**OS version**: macOS Sierra\n\n**Description of the problem including expected versus actual behavior**: fields of type `geo_point` are not described in the `_field_stats` output\n\n**Steps to reproduce**:\nsense requests: https://gist.github.com/spalger/72050d99ee9dc5a4ee8d1186e9cc05c9\nwhich results in a matching filter, but no `pin.location` field listed in the field_stats output\n", "comments": [ { "body": "@jpountz could you take a look at this?\n", "created_at": "2016-09-30T09:57:26Z" }, { "body": "The current behaviour of this API is to ignore all fields for which we do not know how to compute stats. Geo points are fixable, but should we add other fields like geo shapes as well as meta fields such as `_index`? Those are currently ignored as well.\n", "created_at": "2016-10-03T15:39:57Z" }, { "body": "My naive opinion is that all values that work as \"field\" identifiers in queries or aggregations should be included in the stats.\n", "created_at": "2016-10-03T19:14:31Z" }, { "body": "This seems to have been fixed by #21947. After that though the field stats API has been removed.", "created_at": "2018-03-16T10:30:14Z" } ], "number": 20707, "title": "Field stats API skips geo_point fields" }
{ "body": "This PR adds a new `GeoPoint` class to `FieldStats` for computing field stats over `geo_point` field types. Integration tests are updated to include testing geo_point types.\r\n\r\nCloses #20707", "number": 21947, "review_comments": [ { "body": "If the field is not indexed we should be able to return some infos (maxDoc and isAggregatable). For other field stats we return null only if the field is not in the fieldinfos of the index. This means that we ignore fields that the index don't know about but we return a limited field stats object when it is present in at least one document. \r\nThis would look like this:\r\n\r\n```\r\n FieldInfo fi = MultiFields.getMergedFieldInfos(reader).fieldInfo(name());\r\n if (fi == null) {\r\n return null;\r\n }\r\n final long size = PointValues.size(reader, field);\r\n if (size == 0) {\r\n return new FieldStats.GeoPoint(reader.maxDoc(), -1L, -1L, -1L, isSearchable(), isAggregatable());\r\n}\r\n...\r\n```", "created_at": "2016-12-07T09:42:45Z" }, { "body": "This function is used to resolve index constraint on field stats. I am not sure if we should implement index constraint on a geopoint field but if we don't then we should throw an exception here. ", "created_at": "2016-12-07T09:47:18Z" } ], "title": "Add geo_point to FieldStats" }
{ "commits": [ { "message": "Add geo_point to FieldStats\n\nThis commit adds a new GeoPoint class to FieldStats for computing field stats over geo_point field types." } ], "files": [ { "diff": "@@ -124,8 +124,10 @@ public String getDisplayType() {\n return \"string\";\n case 4:\n return \"ip\";\n+ case 5:\n+ return \"geo_point\";\n default:\n- throw new IllegalArgumentException(\"Unknown type.\");\n+ throw new IllegalArgumentException(\"Unknown type 1. \" + type);\n }\n }\n \n@@ -276,7 +278,7 @@ public final void accumulate(FieldStats other) {\n }\n }\n \n- private void updateMinMax(T min, T max) {\n+ protected void updateMinMax(T min, T max) {\n if (compare(minValue, min) > 0) {\n minValue = min;\n }\n@@ -643,6 +645,55 @@ public String getMaxValueAsString() {\n }\n }\n \n+ public static class GeoPoint extends FieldStats<org.elasticsearch.common.geo.GeoPoint> {\n+ public GeoPoint(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,\n+ boolean isSearchable, boolean isAggregatable) {\n+ super((byte) 5, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,\n+ isSearchable, isAggregatable);\n+ }\n+\n+ public GeoPoint(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,\n+ boolean isSearchable, boolean isAggregatable,\n+ org.elasticsearch.common.geo.GeoPoint minValue, org.elasticsearch.common.geo.GeoPoint maxValue) {\n+ super((byte) 5, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,\n+ minValue, maxValue);\n+ }\n+\n+ @Override\n+ public org.elasticsearch.common.geo.GeoPoint valueOf(String value, String fmt) {\n+ return org.elasticsearch.common.geo.GeoPoint.parseFromLatLon(value);\n+ }\n+\n+ @Override\n+ protected void updateMinMax(org.elasticsearch.common.geo.GeoPoint min, org.elasticsearch.common.geo.GeoPoint max) {\n+ minValue.reset(Math.min(min.lat(), minValue.lat()), Math.min(min.lon(), minValue.lon()));\n+ maxValue.reset(Math.max(max.lat(), maxValue.lat()), Math.max(max.lon(), maxValue.lon()));\n+ }\n+\n+ @Override\n+ public int compare(org.elasticsearch.common.geo.GeoPoint p1, org.elasticsearch.common.geo.GeoPoint p2) {\n+ throw new IllegalArgumentException(\"compare is not supported for geo_point field stats\");\n+ }\n+\n+ @Override\n+ public void writeMinMax(StreamOutput out) throws IOException {\n+ out.writeDouble(minValue.lat());\n+ out.writeDouble(minValue.lon());\n+ out.writeDouble(maxValue.lat());\n+ out.writeDouble(maxValue.lon());\n+ }\n+\n+ @Override\n+ public String getMinValueAsString() {\n+ return minValue.toString();\n+ }\n+\n+ @Override\n+ public String getMaxValueAsString() {\n+ return maxValue.toString();\n+ }\n+ }\n+\n public static FieldStats readFrom(StreamInput in) throws IOException {\n byte type = in.readByte();\n long maxDoc = in.readLong();\n@@ -690,7 +741,7 @@ public static FieldStats readFrom(StreamInput in) throws IOException {\n isSearchable, isAggregatable);\n }\n \n- case 4:\n+ case 4: {\n if (hasMinMax == false) {\n return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,\n isSearchable, isAggregatable);\n@@ -705,9 +756,19 @@ public static FieldStats readFrom(StreamInput in) throws IOException {\n InetAddress max = InetAddressPoint.decode(b2);\n return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,\n isSearchable, isAggregatable, min, max);\n-\n+ }\n+ case 5: {\n+ if (hasMinMax == false) {\n+ return new GeoPoint(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,\n+ isSearchable, isAggregatable);\n+ }\n+ org.elasticsearch.common.geo.GeoPoint min = new org.elasticsearch.common.geo.GeoPoint(in.readDouble(), in.readDouble());\n+ org.elasticsearch.common.geo.GeoPoint max = new org.elasticsearch.common.geo.GeoPoint(in.readDouble(), in.readDouble());\n+ return new GeoPoint(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,\n+ isSearchable, isAggregatable, min, max);\n+ }\n default:\n- throw new IllegalArgumentException(\"Unknown type.\");\n+ throw new IllegalArgumentException(\"Unknown type 2. \" + type);\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java", "status": "modified" }, { "diff": "@@ -22,7 +22,9 @@\n import org.apache.lucene.index.FieldInfo;\n import org.apache.lucene.index.IndexReader;\n import org.apache.lucene.index.IndexableField;\n+import org.apache.lucene.index.Terms;\n import org.apache.lucene.search.Query;\n+import org.apache.lucene.util.NumericUtils;\n import org.elasticsearch.ElasticsearchParseException;\n import org.elasticsearch.action.fieldstats.FieldStats;\n import org.elasticsearch.common.Explicit;\n@@ -173,17 +175,21 @@ public Query termQuery(Object value, QueryShardContext context) {\n }\n \n @Override\n- public FieldStats stats(IndexReader reader) throws IOException {\n- int maxDoc = reader.maxDoc();\n- FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());\n+ public FieldStats.GeoPoint stats(IndexReader reader) throws IOException {\n+ String field = name();\n+ FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(field);\n if (fi == null) {\n return null;\n }\n- /**\n- * we don't have a specific type for geo_point so we use an empty {@link FieldStats.Text}.\n- * TODO: we should maybe support a new type that knows how to (de)encode the min/max information\n- */\n- return new FieldStats.Text(maxDoc, -1, -1, -1, isSearchable(), isAggregatable());\n+\n+ Terms terms = org.apache.lucene.index.MultiFields.getTerms(reader, field);\n+ if (terms == null) {\n+ return new FieldStats.GeoPoint(reader.maxDoc(), 0L, -1L, -1L, isSearchable(), isAggregatable());\n+ }\n+ GeoPoint minPt = GeoPoint.fromGeohash(NumericUtils.sortableBytesToLong(terms.getMin().bytes, terms.getMin().offset));\n+ GeoPoint maxPt = GeoPoint.fromGeohash(NumericUtils.sortableBytesToLong(terms.getMax().bytes, terms.getMax().offset));\n+ return new FieldStats.GeoPoint(reader.maxDoc(), terms.getDocCount(), -1L, terms.getSumTotalTermFreq(), isSearchable(),\n+ isAggregatable(), minPt, maxPt);\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java", "status": "modified" }, { "diff": "@@ -21,8 +21,13 @@\n import org.apache.lucene.document.LatLonDocValuesField;\n import org.apache.lucene.document.LatLonPoint;\n import org.apache.lucene.document.StoredField;\n+import org.apache.lucene.geo.GeoEncodingUtils;\n+import org.apache.lucene.index.FieldInfo;\n import org.apache.lucene.index.IndexOptions;\n+import org.apache.lucene.index.IndexReader;\n+import org.apache.lucene.index.PointValues;\n import org.apache.lucene.search.Query;\n+import org.elasticsearch.action.fieldstats.FieldStats;\n import org.elasticsearch.common.Explicit;\n import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.common.geo.GeoUtils;\n@@ -120,6 +125,26 @@ public Query termQuery(Object value, QueryShardContext context) {\n throw new QueryShardException(context, \"Geo fields do not support exact searching, use dedicated geo queries instead: [\"\n + name() + \"]\");\n }\n+\n+ @Override\n+ public FieldStats.GeoPoint stats(IndexReader reader) throws IOException {\n+ String field = name();\n+ FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());\n+ if (fi == null) {\n+ return null;\n+ }\n+ final long size = PointValues.size(reader, field);\n+ if (size == 0) {\n+ return new FieldStats.GeoPoint(reader.maxDoc(), -1L, -1L, -1L, isSearchable(), isAggregatable());\n+ }\n+ final int docCount = PointValues.getDocCount(reader, field);\n+ byte[] min = PointValues.getMinPackedValue(reader, field);\n+ byte[] max = PointValues.getMaxPackedValue(reader, field);\n+ GeoPoint minPt = new GeoPoint(GeoEncodingUtils.decodeLatitude(min, 0), GeoEncodingUtils.decodeLongitude(min, Integer.BYTES));\n+ GeoPoint maxPt = new GeoPoint(GeoEncodingUtils.decodeLatitude(max, 0), GeoEncodingUtils.decodeLongitude(max, Integer.BYTES));\n+ return new FieldStats.GeoPoint(reader.maxDoc(), docCount, -1L, size, isSearchable(), isAggregatable(),\n+ minPt, maxPt);\n+ }\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java", "status": "modified" }, { "diff": "@@ -27,8 +27,10 @@\n import org.elasticsearch.action.fieldstats.FieldStatsResponse;\n import org.elasticsearch.action.fieldstats.IndexConstraint;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.index.cache.request.RequestCacheStats;\n import org.elasticsearch.test.ESIntegTestCase;\n+import org.elasticsearch.test.geo.RandomGeoGenerator;\n \n import java.util.ArrayList;\n import java.util.List;\n@@ -40,6 +42,7 @@\n import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MIN;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;\n+import static org.hamcrest.Matchers.closeTo;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n import static org.hamcrest.Matchers.nullValue;\n@@ -60,7 +63,8 @@ public void testRandom() throws Exception {\n \"long\", \"type=long\",\n \"integer\", \"type=integer\",\n \"short\", \"type=short\",\n- \"byte\", \"type=byte\"));\n+ \"byte\", \"type=byte\",\n+ \"location\", \"type=geo_point\"));\n ensureGreen(\"test\");\n \n // index=false\n@@ -74,7 +78,8 @@ public void testRandom() throws Exception {\n \"long\", \"type=long,index=false\",\n \"integer\", \"type=integer,index=false\",\n \"short\", \"type=short,index=false\",\n- \"byte\", \"type=byte,index=false\"\n+ \"byte\", \"type=byte,index=false\",\n+ \"location\", \"type=geo_point,index=false\"\n ));\n ensureGreen(\"test1\");\n \n@@ -89,7 +94,8 @@ public void testRandom() throws Exception {\n \"long\", \"type=long,index=false\",\n \"integer\", \"type=integer,index=false\",\n \"short\", \"type=short,index=false\",\n- \"byte\", \"type=byte,index=false\"\n+ \"byte\", \"type=byte,index=false\",\n+ \"location\", \"type=geo_point,index=false\"\n ));\n ensureGreen(\"test3\");\n \n@@ -107,6 +113,8 @@ public void testRandom() throws Exception {\n double maxFloat = Double.NEGATIVE_INFINITY;\n double minDouble = Double.POSITIVE_INFINITY;\n double maxDouble = Double.NEGATIVE_INFINITY;\n+ GeoPoint minLoc = new GeoPoint(90, 180);\n+ GeoPoint maxLoc = new GeoPoint(-90, -180);\n String minString = new String(Character.toChars(1114111));\n String maxString = \"0\";\n \n@@ -135,6 +143,9 @@ public void testRandom() throws Exception {\n double d = randomDouble();\n minDouble = Math.min(minDouble, d);\n maxDouble = Math.max(maxDouble, d);\n+ GeoPoint loc = RandomGeoGenerator.randomPoint(random());\n+ minLoc.reset(Math.min(loc.lat(), minLoc.lat()), Math.min(loc.lon(), minLoc.lon()));\n+ maxLoc.reset(Math.max(loc.lat(), maxLoc.lat()), Math.max(loc.lon(), maxLoc.lon()));\n String str = randomRealisticUnicodeOfLength(3);\n if (str.compareTo(minString) < 0) {\n minString = str;\n@@ -151,14 +162,15 @@ public void testRandom() throws Exception {\n \"half_float\", hf,\n \"float\", f,\n \"double\", d,\n+ \"location\", loc,\n \"string\", str)\n );\n }\n indexRandom(true, false, request);\n \n FieldStatsResponse response = client()\n .prepareFieldStats()\n- .setFields(\"byte\", \"short\", \"integer\", \"long\", \"half_float\", \"float\", \"double\", \"string\").get();\n+ .setFields(\"byte\", \"short\", \"integer\", \"long\", \"half_float\", \"float\", \"double\", \"location\", \"string\").get();\n assertAllSuccessful(response);\n \n for (FieldStats<?> stats : response.getAllFieldStats().values()) {\n@@ -188,6 +200,11 @@ public void testRandom() throws Exception {\n assertThat(response.getAllFieldStats().get(\"double\").getMinValue(), equalTo(minDouble));\n assertThat(response.getAllFieldStats().get(\"double\").getMaxValue(), equalTo(maxDouble));\n assertThat(response.getAllFieldStats().get(\"double\").getDisplayType(), equalTo(\"float\"));\n+ assertThat(((GeoPoint)response.getAllFieldStats().get(\"location\").getMinValue()).lat(), closeTo(minLoc.lat(), 1E-5));\n+ assertThat(((GeoPoint)response.getAllFieldStats().get(\"location\").getMinValue()).lon(), closeTo(minLoc.lon(), 1E-5));\n+ assertThat(((GeoPoint)response.getAllFieldStats().get(\"location\").getMaxValue()).lat(), closeTo(maxLoc.lat(), 1E-5));\n+ assertThat(((GeoPoint)response.getAllFieldStats().get(\"location\").getMaxValue()).lon(), closeTo(maxLoc.lon(), 1E-5));\n+ assertThat(response.getAllFieldStats().get(\"location\").getDisplayType(), equalTo(\"geo_point\"));\n }\n \n public void testFieldStatsIndexLevel() throws Exception {\n@@ -522,6 +539,25 @@ public void testCached() throws Exception {\n assertEquals(oldHitCount, indexStats.getHitCount());\n }\n \n+ public void testGeoPointNotIndexed() throws Exception {\n+ assertAcked(prepareCreate(\"test\").addMapping(\"test\", \"value\", \"type=long\", \"location\", \"type=geo_point,index=no\"));\n+ ensureGreen(\"test\");\n+ client().prepareIndex(\"test\", \"test\").setSource(\"value\", 1L, \"location\", new GeoPoint(32, -132)).get();\n+ client().prepareIndex(\"test\", \"test\").setSource(\"value\", 2L).get();\n+ client().prepareIndex(\"test\", \"test\").setSource(\"value\", 3L).get();\n+ client().prepareIndex(\"test\", \"test\").setSource(\"value\", 4L).get();\n+ refresh();\n+\n+ FieldStatsResponse response = client().prepareFieldStats().setFields(\"value\", \"location\").get();\n+ assertAllSuccessful(response);\n+ assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));\n+ assertThat(response.getAllFieldStats().get(\"location\").getMinValue(), equalTo(null));\n+ assertThat(response.getAllFieldStats().get(\"location\").getMaxValue(), equalTo(null));\n+ assertThat(response.getAllFieldStats().get(\"location\").isAggregatable(), equalTo(true));\n+ assertThat(response.getAllFieldStats().get(\"location\").isSearchable(), equalTo(false));\n+\n+ }\n+\n private void indexRange(String index, long from, long to) throws Exception {\n indexRange(index, \"value\", from, to);\n }", "filename": "core/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationIT.java", "status": "modified" }, { "diff": "@@ -34,6 +34,7 @@ setup:\n type: text\n geo:\n type: geo_point\n+ index: no\n geo_shape:\n type: geo_shape\n tree: quadtree\n@@ -51,7 +52,7 @@ setup:\n index: test_2\n type: test\n id: id_10\n- body: { foo: \"babar\", number: 456, bar: \"123\", geo_shape: {type: \"linestring\", coordinates : [[-77.03653, 38.897676], [-77.009051, 38.889939]] } }\n+ body: { foo: \"babar\", number: 456, bar: \"123\", geo: { lat: 48.858093, lon: 2.294694}, geo_shape: {type: \"linestring\", coordinates : [[-77.03653, 38.897676], [-77.009051, 38.889939]] } }\n \n - do:\n indices.refresh: {}\n@@ -84,17 +85,17 @@ setup:\n - is_false: conflicts\n \n ---\n-\"Geopoint field stats\":\n+\"Geo field stats\":\n - skip:\n- version: \" - 5.2.0\"\n+ version: \" - 5.3.0\"\n reason: geo_point fields don't return min/max for versions greater than 5.2.0\n \n - do:\n field_stats:\n fields: [geo, geo_shape]\n \n- - match: { indices._all.fields.geo.type: \"string\" }\n- - match: { indices._all.fields.geo.max_doc: 1 }\n+ - match: { indices._all.fields.geo.type: \"geo_point\" }\n+ - match: { indices._all.fields.geo.max_doc: 2 }\n - match: { indices._all.fields.geo.doc_count: -1 }\n - match: { indices._all.fields.geo.searchable: true }\n - match: { indices._all.fields.geo.aggregatable: true }\n@@ -113,7 +114,6 @@ setup:\n - is_false: indices._all.fields.geo_shape.max_value_as_string\n - is_false: conflicts\n \n-\n ---\n \"Basic field stats with level set to indices\":\n - do:\n@@ -162,9 +162,9 @@ setup:\n \n \n ---\n-\"Geopoint field stats with level set to indices\":\n+\"Geo field stats with level set to indices\":\n - skip:\n- version: \" - 5.2.0\"\n+ version: \" - 5.3.0\"\n reason: geo_point fields don't return min/max for versions greater than 5.2.0\n \n - do:\n@@ -173,15 +173,15 @@ setup:\n level: indices\n \n - match: { indices.test_1.fields.geo.max_doc: 1 }\n- - match: { indices.test_1.fields.geo.doc_count: -1 }\n- - is_false: indices.test_1.fields.geo.min_value\n- - is_false: indices.test_1.fields.geo.max_value\n- - is_false: indices.test_1.fields.geo.min_value_as_string\n- - is_false: indices.test_1.fields.geo.max_value_as_string\n+ - match: { indices.test_1.fields.geo.doc_count: 1 }\n+ - is_true: indices.test_1.fields.geo.min_value\n+ - is_true: indices.test_1.fields.geo.max_value\n+ - is_true: indices.test_1.fields.geo.min_value_as_string\n+ - is_true: indices.test_1.fields.geo.max_value_as_string\n - match: { indices.test_1.fields.geo.searchable: true }\n - match: { indices.test_1.fields.geo.aggregatable: true }\n- - match: { indices.test_1.fields.geo.type: \"string\" }\n- - is_false: indices.test_2.fields.geo\n+ - match: { indices.test_1.fields.geo.type: \"geo_point\" }\n+ - is_true: indices.test_2.fields.geo\n - match: { indices.test_2.fields.geo_shape.max_doc: 1 }\n - match: { indices.test_2.fields.geo_shape.doc_count: -1 }\n - is_false: indices.test_2.fields.geo_shape.min_value\n@@ -191,7 +191,27 @@ setup:\n - match: { indices.test_2.fields.geo_shape.searchable: true }\n - match: { indices.test_2.fields.geo_shape.aggregatable: false }\n - match: { indices.test_2.fields.geo_shape.type: \"string\" }\n- - is_false: indices.test_2.fields.geo\n+ - is_false: conflicts\n+\n+---\n+\"Geopoint field stats\":\n+ - skip:\n+ version: \" - 5.3.0\"\n+ reason: geo_point type not handled for versions earlier than 6.0.0\n+\n+ - do:\n+ field_stats:\n+ fields: [geo]\n+ level: indices\n+\n+ - match: { indices.test_2.fields.geo.max_doc: 1 }\n+ - match: { indices.test_2.fields.geo.doc_count: -1 }\n+ - is_false: indices.test_2.fields.geo.min_value\n+ - is_false: indices.test_2.fields.geo.max_value\n+ - match: { indices.test_2.fields.geo.searchable: false }\n+ - match: { indices.test_2.fields.geo.aggregatable: true }\n+ - match: { indices.test_2.fields.geo.type: \"geo_point\" }\n+ - is_true: indices.test_2.fields.geo\n - is_false: conflicts\n \n ---", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.1 (Elastic Cloud)\r\n**Plugins installed**: [x-pack, ingest-*, analysis-*, etc.]\r\n**JVM version**: (Elastic Cloud)\r\n**OS version**: (Elastic Cloud)\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nSending a query of the form `GET _mapping/X` returns an error, but probably not the one it should:\r\n\r\n```\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"illegal_state_exception\",\r\n \"reason\": \"Failed to close the XContentBuilder\"\r\n }\r\n ],\r\n \"type\": \"illegal_state_exception\",\r\n \"reason\": \"Failed to close the XContentBuilder\",\r\n \"caused_by\": {\r\n \"type\": \"i_o_exception\",\r\n \"reason\": \"Unclosed object or array found\"\r\n }\r\n },\r\n \"status\": 500\r\n}\r\n```", "comments": [ { "body": "I can reproduce this and am investigating.", "created_at": "2016-12-01T21:55:42Z" } ], "number": 21916, "title": "`GET _mapping/X` causes illegal_state_exception and status 500" }
{ "body": "When there are no indexes, get mapping has a series of special cases.\r\nTwo of those expect the response object already started, and the other\r\ntwo respond with an exception. Those two cases (types passed in but no\r\nindexes and vice versa) would fail in their error response generation\r\nbecause it did not expect an object to already be started in the json\r\ngenerator. This change moves the object start to where it is needed for\r\nthe empty responses.\r\n\r\ncloses #21916", "number": 21924, "review_comments": [], "title": "Mappings: Fix get mapping when no indexes exist to not fail in response generation" }
{ "commits": [ { "message": "Mappings: Fix get mapping when no indexes exist to not fail in response generation\n\nWhen there are no indexes, get mapping has a series of special cases.\nTwo of those expect the response object already started, and the other\ntwo respond with an exception. Those two cases (types passed in but no\nindexes and vice versa) would fail in their error response generation\nbecause it did not expect an object to already be started in the json\ngenerator. This change moves the object start to where it is needed for\nthe empty responses.\n\ncloses #21916" } ], "files": [ { "diff": "@@ -66,20 +66,21 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n return channel -> client.admin().indices().getMappings(getMappingsRequest, new RestBuilderListener<GetMappingsResponse>(channel) {\n @Override\n public RestResponse buildResponse(GetMappingsResponse response, XContentBuilder builder) throws Exception {\n- builder.startObject();\n+\n ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsByIndex = response.getMappings();\n if (mappingsByIndex.isEmpty()) {\n if (indices.length != 0 && types.length != 0) {\n- return new BytesRestResponse(OK, builder.endObject());\n+ return new BytesRestResponse(OK, builder.startObject().endObject());\n } else if (indices.length != 0) {\n return new BytesRestResponse(channel, new IndexNotFoundException(indices[0]));\n } else if (types.length != 0) {\n return new BytesRestResponse(channel, new TypeMissingException(\"_all\", types[0]));\n } else {\n- return new BytesRestResponse(OK, builder.endObject());\n+ return new BytesRestResponse(OK, builder.startObject().endObject());\n }\n }\n \n+ builder.startObject();\n for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappingsByIndex) {\n if (indexEntry.value.isEmpty()) {\n continue;", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java", "status": "modified" }, { "diff": "@@ -17,3 +17,11 @@\n type: not_test_type\n \n - match: { '': {}}\n+\n+---\n+\"Type missing when no types exist\":\n+ - do:\n+ catch: missing\n+ indices.get_mapping:\n+ type: not_test_type\n+ ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yaml", "status": "modified" }, { "diff": "@@ -6,4 +6,10 @@\n index: test_index\n type: not_test_type\n \n+---\n+\"Index missing, no indexes, no types\":\n+ - do:\n+ catch: missing\n+ indices.get_mapping:\n+ index: test_index\n ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yaml", "status": "modified" } ] }
{ "body": "I am using the `_termvectors` API with an artificial document, and I hit an inconsistent behavior, depending on which shard the request hits. Below I attach reproducing steps, but let me first explain what I do: I create an index with two shards and index one document with one field and the value \"one one\". When I request the term vectors of that document with `term_statistics`, I get `ttf=2` as expected. At this point I know that one shard contains the document, and the other shard has no documents at all.\r\n\r\nWhen I submit a `_termvectors` request with an artificial document with the field `text:one`, then if the request hits the shard that holds the indexed document, I get back `ttf=2` which is expected. But if it hits the shard without any documents, it returns `ttf=1` which is confusing.\r\n\r\nMore so, if I send an artificial document with the field `text:two` (NOTE: two does not exist in any of the shards!), then the response is similar -- if the query hits the shard without any documents, it returns `ttf=1`, while if it hits the shard with the one indexed document, it does not return `ttf` at all (which is expected).\r\n\r\nHere are the steps to reproduce:\r\n\r\n**Create the index**\r\n```curl -XPUT 'localhost:9200/tv_bug' -d'\r\n{\r\n \"mappings\": {\r\n \"doc\": {\r\n \"properties\": {\r\n \"text\" : {\r\n \"type\": \"text\"\r\n }\r\n }\r\n }\r\n },\r\n \"settings\" : {\r\n \"index\" : {\r\n \"number_of_shards\" : 2,\r\n \"number_of_replicas\" : 0\r\n }\r\n }\r\n}'\r\n```\r\n\r\n**Index one document**\r\n```\r\ncurl -XPUT 'http://localhost:9200/tv_bug/doc/1?pretty=true' -d '{\r\n \"text\" : \"one one\"\r\n}'\r\n```\r\n\r\n**Verify the document exists in one shard**\r\n```\r\ncurl -XGET 'http://localhost:9200/tv_bug/doc/_search?preference=_shards:0&pretty=true'\r\ncurl -XGET 'http://localhost:9200/tv_bug/doc/_search?preference=_shards:1&pretty=true'\r\n```\r\nOne of those requests will return 0 hits, the other 1 (in my case shard 0 did not return the hit)\r\n\r\n**Get the Term Vectors of that one document**\r\n```\r\ncurl -XGET 'http://localhost:9200/tv_bug/doc/1/_termvectors?pretty=true' -d '{\r\n \"fields\": [\"text\"],\r\n \"term_statistics\" : true\r\n}'\r\n```\r\n\r\n**Get the TV of 'one' using an artificial document**\r\n\r\n***From shard 0***\r\n```\r\ncurl -XGET 'http://localhost:9200/tv_bug/doc/_termvectors?preference=_shards:0&pretty=true' -d '{\r\n \"term_statistics\" : true,\r\n \"doc\" : {\r\n \"text\" : \"one\"\r\n }\r\n}'\r\n```\r\n\r\n***From shard 1***\r\n```\r\ncurl -XGET 'http://localhost:9200/tv_bug/doc/_termvectors?preference=_shards:1&pretty=true' -d '{\r\n \"term_statistics\" : true,\r\n \"doc\" : {\r\n \"text\" : \"one\"\r\n }\r\n}'\r\n```\r\n\r\nIn my case, shard 0 returns `ttf=1` and shard 1 returns `ttf=2` (expected). Also, if you send the word \"two\" instead, you will see that shard 1 does not return `ttf` at all (expected), but shard 0 returns `ttf=1`.\r\n\r\nBTW, and this may be a different bug report, when I send the TV requests with the `preference` parameter, I sometimes receive this error:\r\n\r\n```\r\n{\r\n \"error\" : {\r\n \"root_cause\" : [\r\n {\r\n \"type\" : \"remote_transport_exception\",\r\n \"reason\" : \"[C-N1Zbn][127.0.0.1:9300][indices:data/read/tv[s]]\"\r\n }\r\n ],\r\n \"type\" : \"null_pointer_exception\",\r\n \"reason\" : null\r\n },\r\n \"status\" : 500\r\n}\r\n```\r\n\r\nSubmitting the request multiple times eventually succeeds.", "comments": [ { "body": "Oh, forgot to mention that I use 5.0.1.", "created_at": "2016-12-01T10:05:02Z" }, { "body": "Here's the NPE stacktrace from the console BTW:\r\n\r\n```\r\n[2016-12-01T11:59:13,208][WARN ][r.suppressed ] path: /tv_bug/doc/_termvectors, params: {pretty=true, preference=_shards:1, index=tv_bug, type=doc}\r\norg.elasticsearch.transport.RemoteTransportException: [C-N1Zbn][127.0.0.1:9300][indices:data/read/tv[s]]\r\nCaused by: java.lang.NullPointerException\r\n at org.elasticsearch.action.termvectors.TransportTermVectorsAction.shardOperation(TransportTermVectorsAction.java:79) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.action.termvectors.TransportTermVectorsAction.shardOperation(TransportTermVectorsAction.java:42) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$ShardTransportHandler.messageReceived(TransportSingleShardAction.java:293) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$ShardTransportHandler.messageReceived(TransportSingleShardAction.java:286) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:520) [elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.1.jar:5.0.1]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_92]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_92]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_92]\r\n```", "created_at": "2016-12-01T10:46:12Z" }, { "body": "After reviewing ES code, I believe I found the issue, in `TermVectorsWriter.setFields()` (line 72)\r\n```\r\n // if no terms found, take the retrieved term vector fields for stats\r\n if (topLevelTerms == null) {\r\n topLevelTerms = fieldTermVector;\r\n }\r\n```\r\n\r\nSince the shard has no documents indexed, it finds no terms for the artificial document's field, and therefore uses the doc's TV as the terms iterator.\r\n\r\nSo if I send:\r\n\r\n```\r\n$ curl -XGET 'http://localhost:9200/tv_bug/doc/_termvectors?preference=_shards:0&pretty=true' -d '{\r\n \"term_statistics\" : true,\r\n \"doc\" : {\r\n \"text\" : \"one one one\"\r\n }\r\n}'\r\n```\r\n\r\nI get `ttf=3`, which supports the observation above.\r\n\r\nI still think it's a bug, in that it's OK to receive the artificial doc's TV, but I don't expect term_statistics to use the doc's stats as what's in the index?", "created_at": "2016-12-01T11:17:19Z" }, { "body": "Hi @shaie \r\n\r\nThanks for the report and for digging into the code to find the issue. Would you be up for sending a PR to fix this?\r\n\r\nAlso, if you don't mind, I'd be interested to hear how what you're using the term vectors API for?", "created_at": "2016-12-01T11:23:49Z" }, { "body": "Thanks for the quick response @clintongormley. As for what I use it for, see this discussion that I started https://discuss.elastic.co/t/terms-stats-api/67508 and this feature request https://github.com/elastic/elasticsearch/issues/21886. Basically I want to get terms statistics (currently for re-ranking capabilities, and also at the moment outside of 'scripting') and the lack of API got me to try TV and artificial documents, where I send the list of terms I wish to get stats for as an artificial document, to all the shards.\r\n\r\nI would love to send a PR, but I'll need to do some work to setup a dev environment. I.e. I don't have an ES fork, not even Gradle installed 😄 , and so I cannot run ES tests, build the code etc. I will start doing that though, cause I wish to be able to contribute to the code (e.g. maybe this terms_stats API), so it seems worth it. In the meanwhile, I think perhaps if you/we implement something like an EmptyTermsEnum which assumes there are no terms to iterate on, and set `topLevelTerms` to such instance, the rest of the code would just work. But, if I want to test it, I need to setup the dev environment...\r\n\r\nAlso, what about that NPE? Do you prefer I report that in a separate bug report?", "created_at": "2016-12-01T12:02:10Z" }, { "body": "> but I'll need to do some work to setup a dev environment. I.e. I don't have an ES fork, not even Gradle installed \r\n\r\njust a few easy clicks away :)\r\n\r\n> Also, what about that NPE? Do you prefer I report that in a separate bug report?\r\n\r\nNo, a drive-by-fix would be fine :)\r\n\r\n", "created_at": "2016-12-01T12:44:36Z" }, { "body": "> just a few easy clicks away :)\r\n\r\nIndeed. I've got my fork and gradle set up. Now figuring out what do I need to run to test it actually works :).\r\n\r\n> No, a drive-by-fix would be fine :)\r\n\r\nOh well, I don't know the cause for that yet ;)", "created_at": "2016-12-01T12:56:04Z" }, { "body": "@clintongormley I pushed a PR #21922 which fixes the term_statistics bug. I haven't yet addressed the NPE bug as I don't yet know what causes it. I think this should go under a separate commit though. Would appreciate your review and feedback on the changes.", "created_at": "2016-12-01T20:32:44Z" }, { "body": "I created #21928 to continue tracking the NPE issue.", "created_at": "2016-12-02T07:50:40Z" } ], "number": 21906, "title": "Bug in _termvectors with artificial document?" }
{ "body": "If you ask for the term vectors of an artificial document with\r\nterm_statistics=true, but a shard does not have any terms of the doc's\r\nfield(s), it returns the doc's term vectors values as the shard-level\r\nterm statistics. This commit fixes that to return 0 for ttf and also\r\nfield-level aggregated statistics.\r\n\r\nThis closes #21906", "number": 21922, "review_comments": [ { "body": "I think you can use Lucene's `TermsEnum.EMPTY` here?", "created_at": "2016-12-01T21:06:41Z" }, { "body": "Hmm how come this iterator even returns any terms?", "created_at": "2016-12-01T21:16:47Z" }, { "body": "That iterator is the TV terms of the artificial document. Since we asked for an AF, we'll get all of its terms.", "created_at": "2016-12-01T21:25:26Z" }, { "body": "OK thanks @shaie that makes sense ... I'll merge.", "created_at": "2016-12-01T21:31:37Z" } ], "title": "Return correct term statistics when a field is not found in a shard" }
{ "commits": [ { "message": "Return correct term statistics whem a field is not found in a shard\n\nIf you ask for the term vectors of an artificial document with\nterm_statistics=true, but a shard does not have any terms of the doc's\nfield(s), it returns the doc's term vectors values as the shard-level\nterm statistics. This commit fixes that to return 0 for ttf and also\nfield-level aggregated statistics.\n\nThis closes #21906" }, { "message": "Replace EMPTY_TERMS_ENUM with TermsEnum.EMPTY" } ], "files": [ { "diff": "@@ -71,7 +71,7 @@ void setFields(Fields termVectorsByField, Set<String> selectedFields, EnumSet<Fl\n \n // if no terms found, take the retrieved term vector fields for stats\n if (topLevelTerms == null) {\n- topLevelTerms = fieldTermVector;\n+ topLevelTerms = EMPTY_TERMS;\n }\n \n TermsEnum topLevelIterator = topLevelTerms.iterator();\n@@ -292,4 +292,18 @@ private void writePotentiallyNegativeVLong(long value) throws IOException {\n // further...\n output.writeVLong(Math.max(0, value + 1));\n }\n+\n+ /** Implements an empty {@link Terms}. */\n+ private static final Terms EMPTY_TERMS = new Terms() {\n+ @Override public TermsEnum iterator() throws IOException { return TermsEnum.EMPTY; }\n+ @Override public long size() throws IOException { return 0; }\n+ @Override public long getSumTotalTermFreq() throws IOException { return 0; }\n+ @Override public long getSumDocFreq() throws IOException { return 0; }\n+ @Override public int getDocCount() throws IOException { return 0; }\n+ @Override public boolean hasFreqs() { return false; }\n+ @Override public boolean hasOffsets() { return false; }\n+ @Override public boolean hasPositions() { return false; }\n+ @Override public boolean hasPayloads() { return false; }\n+ };\n+\n }", "filename": "core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java", "status": "modified" }, { "diff": "@@ -848,6 +848,16 @@ public void testArtificialNoDoc() throws IOException {\n .get();\n assertThat(resp.isExists(), equalTo(true));\n checkBrownFoxTermVector(resp.getFields(), \"field1\", false);\n+\n+ // Since the index is empty, all of artificial document's \"term_statistics\" should be 0/absent\n+ Terms terms = resp.getFields().terms(\"field1\");\n+ assertEquals(\"sumDocFreq should be 0 for a non-existing field!\", 0, terms.getSumDocFreq());\n+ assertEquals(\"sumTotalTermFreq should be 0 for a non-existing field!\", 0, terms.getSumTotalTermFreq());\n+ TermsEnum termsEnum = terms.iterator(); // we're guaranteed to receive terms for that field\n+ while (termsEnum.next() != null) {\n+ String term = termsEnum.term().utf8ToString();\n+ assertEquals(\"term [\" + term + \"] does not exist in the index; ttf should be 0!\", 0, termsEnum.totalTermFreq());\n+ }\n }\n \n public void testPerFieldAnalyzer() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java", "status": "modified" } ] }
{ "body": "It’s not operate when %java_home% contains space.\r\nlike ‘C:\\Program Files (x86)\\Java\\jre1.8.0_101’\r\n\r\nrelated #20809", "comments": [ { "body": "I signed the CLA now.\n", "created_at": "2016-11-14T07:29:28Z" }, { "body": "I'm not sure, can you please provide clear steps to reproduce? To be clear, in the Windows VM that I test on, the path to `java` contains a space.\n", "created_at": "2016-11-15T22:29:44Z" }, { "body": "![image](https://cloud.githubusercontent.com/assets/2272342/20331600/bc37fd4a-abe9-11e6-9019-581219b1eecf.png)\n(error case - exist path containing 2 spaces without quotation mark)\n('은(는) 예상되지 않았습니다' means 'was unexpected at this time.')\n\n![image](https://cloud.githubusercontent.com/assets/2272342/20331631/f095c2de-abe9-11e6-8b2d-8b8a81079a74.png)\n(error case - not exist path containing 2 spaces without quotation mark)\n\n![image](https://cloud.githubusercontent.com/assets/2272342/20332113/15ada958-abed-11e6-9699-fd58c251ee16.png)\n(not exist path containing 1 space without quotation mark)\n\n![image](https://cloud.githubusercontent.com/assets/2272342/20332136/395d01aa-abed-11e6-8552-30fe322c2ddd.png)\n(not exist path containing 2 spaces with quotation mark)\n\n![image](https://cloud.githubusercontent.com/assets/2272342/20332424/09bc4e0e-abef-11e6-9c5a-cc0d86a29a3f.png)\n(exist path containing 2 spaces with quotation martk but java 1.7)\n\n![image](https://cloud.githubusercontent.com/assets/2272342/20332456/4dc0488a-abef-11e6-9131-524035dea4fc.png)\n(exist path containing 1 space with quotation mark, java 1.8)\n\nI tested several cases for confirmation.\nIt works when java_home path have 1 space. but error occurred when java_home path have 2 spaces.\n", "created_at": "2016-11-16T02:02:36Z" }, { "body": "Thank you for clarifying @yoonkh2000. I looked into this issue, and there is more work needed here. I would like to get a fix in quickly for the next release, so I'm going to open a separate PR and close this one. Thank you for opening this PR and leading the way on this issue.", "created_at": "2016-12-01T20:08:43Z" }, { "body": "I opened #21921.", "created_at": "2016-12-01T20:11:29Z" }, { "body": "Okay. Thanks @jasontedor ", "created_at": "2016-12-02T03:19:21Z" } ], "number": 21525, "title": "On windows, allow java_home to contain spaces" }
{ "body": "This commit fixes the handling of spaces in Windows paths. The current\r\nmechanism works fine in a path that contains a single space, but fails\r\non a path that contains multiple spaces. With this commit, that is no\r\nlonger the case.\r\n\r\nRelates #20809, relates #21525", "number": 21921, "review_comments": [ { "body": "The quotes should surround the entire path, including the .exe.", "created_at": "2016-12-01T22:24:07Z" }, { "body": "The quotes should surround the entire path, including the .exe.", "created_at": "2016-12-01T22:24:15Z" }, { "body": "Why? I tested it with this change. Do you have an example where this would break without your suggestion?", "created_at": "2016-12-01T22:46:10Z" } ], "title": "Fix handling of spaces in Windows paths" }
{ "commits": [ { "message": "Fix handling of spaces in Windows paths\n\nThis commit fixes the handling of spaces in Windows paths. The current\nmechanism works fine in a path that contains a single space, but fails\non a path that contains multiple spaces. With this commit, that is no\nlonger the case." } ], "files": [ { "diff": "@@ -4,20 +4,20 @@ SETLOCAL enabledelayedexpansion\n TITLE Elasticsearch Service ${project.version}\n \n IF DEFINED JAVA_HOME (\n- SET JAVA=%JAVA_HOME%\\bin\\java.exe\n+ SET JAVA=\"%JAVA_HOME%\"\\bin\\java.exe\n ) ELSE (\n FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I\n )\n-IF NOT EXIST \"%JAVA%\" (\n+IF NOT EXIST %JAVA% (\n ECHO Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME 1>&2\n EXIT /B 1\n )\n IF DEFINED JAVA_HOME GOTO :cont\n \n-IF NOT \"%JAVA:~-13%\" == \"\\bin\\java.exe\" (\n+IF NOT %JAVA:~-13% == \"\\bin\\java.exe\" (\n FOR /f \"tokens=2 delims=[]\" %%I IN ('dir %JAVA%') DO @set JAVA=%%I\n )\n-IF \"%JAVA:~-13%\" == \"\\bin\\java.exe\" (\n+IF %JAVA:~-13% == \"\\bin\\java.exe\" (\n SET JAVA_HOME=%JAVA:~0,-13%\n )\n \n@@ -27,14 +27,14 @@ if not \"%CONF_FILE%\" == \"\" goto conffileset\n set SCRIPT_DIR=%~dp0\n for %%I in (\"%SCRIPT_DIR%..\") do set ES_HOME=%%~dpfI\n \n-\"%JAVA%\" -Xmx50M -version > nul 2>&1\n+%JAVA% -Xmx50M -version > nul 2>&1\n \n if errorlevel 1 (\n \techo Warning: Could not start JVM to detect version, defaulting to x86:\n \tgoto x86\n )\n \n-\"%JAVA%\" -Xmx50M -version 2>&1 | \"%windir%\\System32\\find\" \"64-Bit\" >nul:\n+%JAVA% -Xmx50M -version 2>&1 | \"%windir%\\System32\\find\" \"64-Bit\" >nul:\n \n if errorlevel 1 goto x86\n set EXECUTABLE=%ES_HOME%\\bin\\elasticsearch-service-x64.exe", "filename": "distribution/src/main/resources/bin/elasticsearch-service.bat", "status": "modified" }, { "diff": "@@ -54,6 +54,6 @@ IF ERRORLEVEL 1 (\n \tEXIT /B %ERRORLEVEL%\n )\n \n-\"%JAVA%\" %ES_JAVA_OPTS% %ES_PARAMS% -cp \"%ES_CLASSPATH%\" \"org.elasticsearch.bootstrap.Elasticsearch\" !newparams!\n+%JAVA% %ES_JAVA_OPTS% %ES_PARAMS% -cp \"%ES_CLASSPATH%\" \"org.elasticsearch.bootstrap.Elasticsearch\" !newparams!\n \n ENDLOCAL", "filename": "distribution/src/main/resources/bin/elasticsearch.bat", "status": "modified" }, { "diff": "@@ -1,11 +1,11 @@\n @echo off\n \n IF DEFINED JAVA_HOME (\n- set JAVA=%JAVA_HOME%\\bin\\java.exe\n+ set JAVA=\"%JAVA_HOME%\"\\bin\\java.exe\n ) ELSE (\n FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I\n )\n-IF NOT EXIST \"%JAVA%\" (\n+IF NOT EXIST %JAVA% (\n ECHO Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME 1>&2\n EXIT /B 1\n )", "filename": "distribution/src/main/resources/bin/elasticsearch.in.bat", "status": "modified" } ] }
{ "body": "Multi-search supports the `max_concurrent_searches` QS parameter, but this is missing from the `/_msearch/template` endpoint:\n\n```\nPUT test/foo/1\n{\"user\":\"john\"}\n\nGET _msearch/template?max_concurrent_searches=1\n{\"index\": \"test\"}\n{\"inline\": {\"query\": {\"match\": {\"user\" : \"{{username}}\" }}}, \"params\": {\"username\": \"john\"}} \n\n```\n\nthrows:\n\n```\n\"request [/_msearch/template] contains unrecognized parameter: [max_concurrent_searches]\"\n```\n", "comments": [ { "body": "Is this issue still there? I have seen the pull request is showing \"This branch has conflicts that must be resolved\". So it means this PR hasn't accepted yet. Would you mind if I taking over and fix the issue there?", "created_at": "2017-03-13T09:24:45Z" }, { "body": "To be honest that PR looks pretty close. @martijnvg, would you like to adopt the PR?", "created_at": "2017-03-13T15:36:35Z" }, { "body": "I can try working on this issue now, would it be okay to assign this to me?", "created_at": "2017-03-13T22:42:39Z" }, { "body": "@nik9000 That PR only adds the setting to the request and request builder. Now I think about this more I think we shouldn't try to duplicate the `max_concurrent_searches` logic in this api, but rather duplicate the templating logic from the search template api. This `max_concurrent_searches` logic has turned out to be [pretty](https://github.com/elastic/elasticsearch/pull/23527) [tricky](https://github.com/elastic/elasticsearch/pull/23538).", "created_at": "2017-03-17T16:13:15Z" } ], "number": 20912, "title": "Add `max_concurrent_searches` to msearch-template" }
{ "body": "As observed by @clintongormley in #20912 max_concurrent_searches is missing from the _msearch/template endpoint. This PR adds the missing parameter.\r\n\r\nCloses #20912\r\n\r\nOne observation: Looking at the code there is quite some duplication between _msearch and _msearch/template due to the latter one having been moved to the mustache module. Not sure if this could be reduced in a follow-up.\r\n\r\n@martijnvg would be great if you could take a look, as you introduced the max_concurrent_searches parameter for _msearch.", "number": 21907, "review_comments": [], "title": "This adds max_concurrent_searches to multi-search-template endpoint." }
{ "commits": [ { "message": "This adds max_concurrent_searches to multi-search-template endpoint.\n\nCloses #20912" } ], "files": [ { "diff": "@@ -34,6 +34,7 @@\n \n public class MultiSearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest {\n \n+ private int maxConcurrentSearchRequests = 0;\n private List<SearchTemplateRequest> requests = new ArrayList<>();\n \n private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();\n@@ -56,6 +57,26 @@ public MultiSearchTemplateRequest add(SearchTemplateRequest request) {\n return this;\n }\n \n+\n+ /**\n+ * Returns the amount of search requests specified in this multi search requests are allowed to be ran concurrently.\n+ */\n+ public int maxConcurrentSearchRequests() {\n+ return maxConcurrentSearchRequests;\n+ }\n+\n+ /**\n+ * Sets how many search requests specified in this multi search requests are allowed to be ran concurrently.\n+ */\n+ public MultiSearchTemplateRequest maxConcurrentSearchRequests(int maxConcurrentSearchRequests) {\n+ if (maxConcurrentSearchRequests < 1) {\n+ throw new IllegalArgumentException(\"maxConcurrentSearchRequests must be positive\");\n+ }\n+\n+ this.maxConcurrentSearchRequests = maxConcurrentSearchRequests;\n+ return this;\n+ }\n+\n public List<SearchTemplateRequest> requests() {\n return this.requests;\n }\n@@ -90,12 +111,14 @@ public MultiSearchTemplateRequest indicesOptions(IndicesOptions indicesOptions)\n @Override\n public void readFrom(StreamInput in) throws IOException {\n super.readFrom(in);\n+ maxConcurrentSearchRequests = in.readVInt();\n requests = in.readStreamableList(SearchTemplateRequest::new);\n }\n \n @Override\n public void writeTo(StreamOutput out) throws IOException {\n super.writeTo(out);\n+ out.writeVInt(maxConcurrentSearchRequests);\n out.writeStreamableList(requests);\n }\n }", "filename": "modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java", "status": "modified" }, { "diff": "@@ -58,4 +58,12 @@ public MultiSearchTemplateRequestBuilder setIndicesOptions(IndicesOptions indice\n request().indicesOptions(indicesOptions);\n return this;\n }\n+\n+ /**\n+ * Sets how many search requests specified in this multi search requests are allowed to be ran concurrently.\n+ */\n+ public MultiSearchTemplateRequestBuilder setMaxConcurrentSearchRequests(int maxConcurrentSearchRequests) {\n+ request().maxConcurrentSearchRequests(maxConcurrentSearchRequests);\n+ return this;\n+ }\n }", "filename": "modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestBuilder.java", "status": "modified" }, { "diff": "@@ -69,6 +69,10 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client\n public static MultiSearchTemplateRequest parseRequest(RestRequest restRequest, boolean allowExplicitIndex) throws IOException {\n \n MultiSearchTemplateRequest multiRequest = new MultiSearchTemplateRequest();\n+ if (restRequest.hasParam(\"max_concurrent_searches\")) {\n+ multiRequest.maxConcurrentSearchRequests(restRequest.paramAsInt(\"max_concurrent_searches\", 0));\n+ }\n+\n RestMultiSearchAction.parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex,\n (searchRequest, bytes) -> {\n try {", "filename": "modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.script.mustache;\n \n+import org.elasticsearch.action.search.MultiSearchRequest;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.rest.RestRequest;\n import org.elasticsearch.script.ScriptType;\n@@ -68,4 +69,12 @@ public void testParseRequest() throws Exception {\n assertEquals(1, request.requests().get(1).getScriptParams().size());\n assertEquals(1, request.requests().get(2).getScriptParams().size());\n }\n+ \n+ public void testMaxConcurrentSearchRequests() {\n+ MultiSearchRequest request = new MultiSearchRequest();\n+ request.maxConcurrentSearchRequests(randomIntBetween(1, Integer.MAX_VALUE));\n+ expectThrows(IllegalArgumentException.class, () ->\n+ request.maxConcurrentSearchRequests(randomIntBetween(Integer.MIN_VALUE, 0)));\n+ }\n+\n }", "filename": "modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java", "status": "modified" }, { "diff": "@@ -20,6 +20,10 @@\n \"type\" : \"enum\",\n \"options\" : [\"query_then_fetch\", \"query_and_fetch\", \"dfs_query_then_fetch\", \"dfs_query_and_fetch\"],\n \"description\" : \"Search operation type\"\n+ },\n+ \"max_concurrent_searches\" : {\n+ \"type\" : \"number\",\n+ \"description\" : \"Controls the maximum number of concurrent searches the multi search api will execute\"\n }\n }\n },", "filename": "rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json", "status": "modified" } ] }
{ "body": "I'm unable to pull the segment file sizes using the `/_stats` endpoint as documented [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html). Here's an example of the failing API call:\r\n\r\n```\r\n$ curl -v \"http://127.0.0.1:9200/_stats/segments?include_segment_file_sizes&pretty=true\"\r\n* About to connect() to 127.0.0.1 port 9200 (#0)\r\n* Trying 127.0.0.1...\r\n* Connected to 127.0.0.1 (127.0.0.1) port 9200 (#0)\r\n> GET /_stats/segments?include_segment_file_sizes&pretty=true HTTP/1.1\r\n> User-Agent: curl/7.29.0\r\n> Host: 127.0.0.1:9200\r\n> Accept: */*\r\n> \r\n< HTTP/1.1 400 Bad Request\r\n< content-type: application/json; charset=UTF-8\r\n< content-length: 390\r\n<\r\n{\r\n \"error\" : {\r\n \"root_cause\" : [\r\n {\r\n \"type\" : \"illegal_argument_exception\",\r\n \"reason\" : \"request [/_stats/segments] contains unrecognized parameter: [include_segment_file_sizes]\"\r\n }\r\n ],\r\n \"type\" : \"illegal_argument_exception\",\r\n \"reason\" : \"request [/_stats/segments] contains unrecognized parameter: [include_segment_file_sizes]\"\r\n },\r\n \"status\" : 400\r\n}\r\n* Connection #0 to host 127.0.0.1 left intact\r\n```\r\n\r\nI've found the issue in the REST API handler, and will be submitting a PR to resolve shortly.", "comments": [ { "body": "@spiegela Apologies for leaving this bug report unattended. Currently the request that you were making is working as expected. I will close this issue, since it has been reported a long time and many things have changed in the meantime. Feel free to open another issue if you still manage to reproduce this error on a recent ES version.", "created_at": "2018-03-21T08:30:32Z" }, { "body": "@martijnvg no worries. PR #21879 was merged, and resolved this bug. I think this was only outstanding as a housekeeping issue. I just checked against `6.5` and it still appears to be working 😄 ", "created_at": "2018-04-05T14:40:43Z" } ], "number": 21878, "title": "Unable to view segment file sizes stats" }
{ "body": "Fix for #21878\r\n\r\nAdds `include_segment_file_sizes` to acceptable parameters in the `RestIndicesStatsAction` REST handler.\r\n\r\nThanks!", "number": 21879, "review_comments": [ { "body": "This isn't the right fix, it's not a response parameter (it controls the request that the node client sends). Rather, this parameter needs to be consumed (and parsed as a boolean).", "created_at": "2016-11-30T04:22:28Z" }, { "body": "Gotcha. I see some examples of that now using the `request.paramAsBoolean`. I'll correct it this way, and correct it in the node stats, too.", "created_at": "2016-11-30T05:10:11Z" }, { "body": "Thanks @spiegela. ", "created_at": "2016-11-30T05:12:46Z" }, { "body": "Since we have to specify a default, I think it might be cleaner to just write:\r\n\r\n```diff\r\ndiff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java\r\nindex 9309484..7fee930 100644\r\n--- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java\r\n+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java\r\n@@ -180,8 +180,8 @@ public class RestNodesStatsAction extends BaseRestHandler {\r\n if (nodesStatsRequest.indices().isSet(Flag.Indexing) && (request.hasParam(\"types\"))) {\r\n nodesStatsRequest.indices().types(request.paramAsStringArray(\"types\", null));\r\n }\r\n- if (nodesStatsRequest.indices().isSet(Flag.Segments) && (request.hasParam(\"include_segment_file_sizes\"))) {\r\n- nodesStatsRequest.indices().includeSegmentFileSizes(true);\r\n+ if (nodesStatsRequest.indices().isSet(Flag.Segments)) {\r\n+ nodesStatsRequest.indices().includeSegmentFileSizes(request.paramAsBoolean(\"include_segment_file_sizes\", false));\r\n }\r\n \r\n return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel));\r\n```", "created_at": "2016-12-01T21:55:57Z" }, { "body": "Same comment here as above.", "created_at": "2016-12-01T21:59:14Z" }, { "body": "Can you fix the grammar here, remove the period, and add a remark that it only applies if segments stats are requested?", "created_at": "2016-12-01T22:02:05Z" } ], "title": "Add support for \"include_segment_file_sizes\" in indices stats REST handler" }
{ "commits": [ { "message": "Add support for include_segment_file_sizes in indices & nodes stats REST handlers\n\n Fixes #21878" } ], "files": [ { "diff": "@@ -34,7 +34,6 @@\n import java.io.IOException;\n import java.util.Collections;\n import java.util.HashMap;\n-import java.util.HashSet;\n import java.util.Locale;\n import java.util.Map;\n import java.util.Set;\n@@ -180,8 +179,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n if (nodesStatsRequest.indices().isSet(Flag.Indexing) && (request.hasParam(\"types\"))) {\n nodesStatsRequest.indices().types(request.paramAsStringArray(\"types\", null));\n }\n- if (nodesStatsRequest.indices().isSet(Flag.Segments) && (request.hasParam(\"include_segment_file_sizes\"))) {\n- nodesStatsRequest.indices().includeSegmentFileSizes(true);\n+ if (nodesStatsRequest.indices().isSet(Flag.Segments)) {\n+ nodesStatsRequest.indices().includeSegmentFileSizes(request.paramAsBoolean(\"include_segment_file_sizes\", false));\n }\n \n return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel));", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java", "status": "modified" }, { "diff": "@@ -37,7 +37,6 @@\n import java.io.IOException;\n import java.util.Collections;\n import java.util.HashMap;\n-import java.util.HashSet;\n import java.util.Locale;\n import java.util.Map;\n import java.util.Set;\n@@ -136,8 +135,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n request.paramAsStringArray(\"fielddata_fields\", request.paramAsStringArray(\"fields\", Strings.EMPTY_ARRAY)));\n }\n \n- if (indicesStatsRequest.segments() && request.hasParam(\"include_segment_file_sizes\")) {\n- indicesStatsRequest.includeSegmentFileSizes(true);\n+ if (indicesStatsRequest.segments()) {\n+ indicesStatsRequest.includeSegmentFileSizes(request.paramAsBoolean(\"include_segment_file_sizes\", false));\n }\n \n return channel -> client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener<IndicesStatsResponse>(channel) {", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java", "status": "modified" }, { "diff": "@@ -52,6 +52,11 @@\n \"types\" : {\n \"type\" : \"list\",\n \"description\" : \"A comma-separated list of document types for the `indexing` index metric\"\n+ },\n+ \"include_segment_file_sizes\": {\n+ \"type\": \"boolean\",\n+ \"description\": \"Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)\",\n+ \"default\": false\n }\n }\n },", "filename": "rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json", "status": "modified" }, { "diff": "@@ -63,6 +63,11 @@\n \"timeout\": {\n \"type\" : \"time\",\n \"description\" : \"Explicit operation timeout\"\n+ },\n+ \"include_segment_file_sizes\": {\n+ \"type\": \"boolean\",\n+ \"description\": \"Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)\",\n+ \"default\": false\n }\n }\n },", "filename": "rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json", "status": "modified" }, { "diff": "@@ -121,3 +121,46 @@ setup:\n - is_false: _all.total.translog\n - is_true: _all.total.recovery\n \n+---\n+\"Metric - _all include_segment_file_sizes\":\n+ - do:\n+ indices.stats: { metric: _all, include_segment_file_sizes: true }\n+\n+ - is_true: _all.total.docs\n+ - is_true: _all.total.store\n+ - is_true: _all.total.indexing\n+ - is_true: _all.total.get\n+ - is_true: _all.total.search\n+ - is_true: _all.total.merges\n+ - is_true: _all.total.refresh\n+ - is_true: _all.total.flush\n+ - is_true: _all.total.warmer\n+ - is_true: _all.total.query_cache\n+ - is_true: _all.total.fielddata\n+ - is_true: _all.total.completion\n+ - is_true: _all.total.segments\n+ - is_true: _all.total.translog\n+ - is_true: _all.total.recovery\n+ - is_true: _all.total.segments.file_sizes\n+\n+---\n+\"Metric - segments include_segment_file_sizes\":\n+ - do:\n+ indices.stats: { metric: segments, include_segment_file_sizes: true }\n+\n+ - is_false: _all.total.docs\n+ - is_false: _all.total.store\n+ - is_false: _all.total.indexing\n+ - is_false: _all.total.get\n+ - is_false: _all.total.search\n+ - is_false: _all.total.merges\n+ - is_false: _all.total.refresh\n+ - is_false: _all.total.flush\n+ - is_false: _all.total.warmer\n+ - is_false: _all.total.query_cache\n+ - is_false: _all.total.fielddata\n+ - is_false: _all.total.completion\n+ - is_true: _all.total.segments\n+ - is_false: _all.total.translog\n+ - is_false: _all.total.recovery\n+ - is_true: _all.total.segments.file_sizes", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml", "status": "modified" }, { "diff": "@@ -0,0 +1,211 @@\n+---\n+\"Metric - blank\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: {}\n+\n+ - is_true: nodes.$master.indices.docs\n+ - is_true: nodes.$master.indices.store\n+ - is_true: nodes.$master.indices.indexing\n+ - is_true: nodes.$master.indices.get\n+ - is_true: nodes.$master.indices.search\n+ - is_true: nodes.$master.indices.merges\n+ - is_true: nodes.$master.indices.refresh\n+ - is_true: nodes.$master.indices.flush\n+ - is_true: nodes.$master.indices.warmer\n+ - is_true: nodes.$master.indices.query_cache\n+ - is_true: nodes.$master.indices.fielddata\n+ - is_true: nodes.$master.indices.completion\n+ - is_true: nodes.$master.indices.segments\n+ - is_true: nodes.$master.indices.translog\n+ - is_true: nodes.$master.indices.recovery\n+\n+---\n+\"Metric - _all\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: _all }\n+\n+ - is_true: nodes.$master.indices.docs\n+ - is_true: nodes.$master.indices.store\n+ - is_true: nodes.$master.indices.indexing\n+ - is_true: nodes.$master.indices.get\n+ - is_true: nodes.$master.indices.search\n+ - is_true: nodes.$master.indices.merges\n+ - is_true: nodes.$master.indices.refresh\n+ - is_true: nodes.$master.indices.flush\n+ - is_true: nodes.$master.indices.warmer\n+ - is_true: nodes.$master.indices.query_cache\n+ - is_true: nodes.$master.indices.fielddata\n+ - is_true: nodes.$master.indices.completion\n+ - is_true: nodes.$master.indices.segments\n+ - is_true: nodes.$master.indices.translog\n+ - is_true: nodes.$master.indices.recovery\n+\n+---\n+\"Metric - indices _all\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: indices, index_metric: _all }\n+\n+ - is_true: nodes.$master.indices.docs\n+ - is_true: nodes.$master.indices.store\n+ - is_true: nodes.$master.indices.indexing\n+ - is_true: nodes.$master.indices.get\n+ - is_true: nodes.$master.indices.search\n+ - is_true: nodes.$master.indices.merges\n+ - is_true: nodes.$master.indices.refresh\n+ - is_true: nodes.$master.indices.flush\n+ - is_true: nodes.$master.indices.warmer\n+ - is_true: nodes.$master.indices.query_cache\n+ - is_true: nodes.$master.indices.fielddata\n+ - is_true: nodes.$master.indices.completion\n+ - is_true: nodes.$master.indices.segments\n+ - is_true: nodes.$master.indices.translog\n+ - is_true: nodes.$master.indices.recovery\n+\n+---\n+\"Metric - one\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: indices, index_metric: docs }\n+\n+ - is_true: nodes.$master.indices.docs\n+ - is_false: nodes.$master.indices.store\n+ - is_false: nodes.$master.indices.indexing\n+ - is_false: nodes.$master.indices.get\n+ - is_false: nodes.$master.indices.search\n+ - is_false: nodes.$master.indices.merges\n+ - is_false: nodes.$master.indices.refresh\n+ - is_false: nodes.$master.indices.flush\n+ - is_false: nodes.$master.indices.warmer\n+ - is_false: nodes.$master.indices.query_cache\n+ - is_false: nodes.$master.indices.fielddata\n+ - is_false: nodes.$master.indices.completion\n+ - is_false: nodes.$master.indices.segments\n+ - is_false: nodes.$master.indices.translog\n+ - is_false: nodes.$master.indices.recovery\n+\n+---\n+\"Metric - multi\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: indices, index_metric: [ store, get, merge ] }\n+\n+ - is_false: nodes.$master.indices.docs\n+ - is_true: nodes.$master.indices.store\n+ - is_false: nodes.$master.indices.indexing\n+ - is_true: nodes.$master.indices.get\n+ - is_false: nodes.$master.indices.search\n+ - is_true: nodes.$master.indices.merges\n+ - is_false: nodes.$master.indices.refresh\n+ - is_false: nodes.$master.indices.flush\n+ - is_false: nodes.$master.indices.warmer\n+ - is_false: nodes.$master.indices.query_cache\n+ - is_false: nodes.$master.indices.fielddata\n+ - is_false: nodes.$master.indices.completion\n+ - is_false: nodes.$master.indices.segments\n+ - is_false: nodes.$master.indices.translog\n+ - is_false: nodes.$master.indices.recovery\n+\n+\n+---\n+\"Metric - recovery\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: indices, index_metric: [ recovery ] }\n+\n+ - is_false: nodes.$master.indices.docs\n+ - is_false: nodes.$master.indices.store\n+ - is_false: nodes.$master.indices.indexing\n+ - is_false: nodes.$master.indices.get\n+ - is_false: nodes.$master.indices.search\n+ - is_false: nodes.$master.indices.merges\n+ - is_false: nodes.$master.indices.refresh\n+ - is_false: nodes.$master.indices.flush\n+ - is_false: nodes.$master.indices.warmer\n+ - is_false: nodes.$master.indices.query_cache\n+ - is_false: nodes.$master.indices.fielddata\n+ - is_false: nodes.$master.indices.completion\n+ - is_false: nodes.$master.indices.segments\n+ - is_false: nodes.$master.indices.translog\n+ - is_true: nodes.$master.indices.recovery\n+\n+---\n+\"Metric - _all include_segment_file_sizes\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true }\n+\n+ - is_true: nodes.$master.indices.docs\n+ - is_true: nodes.$master.indices.store\n+ - is_true: nodes.$master.indices.indexing\n+ - is_true: nodes.$master.indices.get\n+ - is_true: nodes.$master.indices.search\n+ - is_true: nodes.$master.indices.merges\n+ - is_true: nodes.$master.indices.refresh\n+ - is_true: nodes.$master.indices.flush\n+ - is_true: nodes.$master.indices.warmer\n+ - is_true: nodes.$master.indices.query_cache\n+ - is_true: nodes.$master.indices.fielddata\n+ - is_true: nodes.$master.indices.completion\n+ - is_true: nodes.$master.indices.segments\n+ - is_true: nodes.$master.indices.translog\n+ - is_true: nodes.$master.indices.recovery\n+ - is_true: nodes.$master.indices.segments.file_sizes\n+\n+---\n+\"Metric - segments include_segment_file_sizes\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true }\n+\n+ - is_false: nodes.$master.indices.docs\n+ - is_false: nodes.$master.indices.store\n+ - is_false: nodes.$master.indices.indexing\n+ - is_false: nodes.$master.indices.get\n+ - is_false: nodes.$master.indices.search\n+ - is_false: nodes.$master.indices.merges\n+ - is_false: nodes.$master.indices.refresh\n+ - is_false: nodes.$master.indices.flush\n+ - is_false: nodes.$master.indices.warmer\n+ - is_false: nodes.$master.indices.query_cache\n+ - is_false: nodes.$master.indices.fielddata\n+ - is_false: nodes.$master.indices.completion\n+ - is_true: nodes.$master.indices.segments\n+ - is_false: nodes.$master.indices.translog\n+ - is_false: nodes.$master.indices.recovery\n+ - is_true: nodes.$master.indices.segments.file_sizes\n+", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml", "status": "added" } ] }
{ "body": "I have worked with Search-Templates before. In older versions of ES it worked.\r\nBut now in ES 5.0.1 it is not working anymore.\r\nI'm following the docs here:\r\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#pre-registered-templates\r\n\r\n\r\n\tPOST /_search/template/template1\r\n\t{\r\n\t \"template\": {\r\n\t \"size\": 10,\r\n\t \"query\": {\r\n\t \"multi_match\": {\r\n\t\t\"query\": \"{{query_string}}\",\r\n\t\t\"fields\": [\r\n\t\t \"description\",\r\n\t\t \"title\"\r\n\t\t]\r\n\t }\r\n\t }\r\n\t }\r\n\t}\r\n\r\n\r\n\tGET /_search/template\r\n\t{\r\n\t \"id\": \"template1\", \r\n\t \"params\": {\r\n\t\t\"query_string\": \"search for these words\"\r\n\t }\r\n\t}\r\n\r\nStoring the template returns a success message.\r\n\r\nSearching returns:\r\n\r\n\t{\r\n\t \"error\": {\r\n\t \"root_cause\": [\r\n\t {\r\n\t\t\"type\": \"illegal_state_exception\",\r\n\t\t\"reason\": \"There are no external requests known to support wildcards that don't support replacing their indices\"\r\n\t }\r\n\t ],\r\n\t \"type\": \"illegal_state_exception\",\r\n\t \"reason\": \"There are no external requests known to support wildcards that don't support replacing their indices\"\r\n\t },\r\n\t \"status\": 500\r\n\t}\r\n\r\n\r\nThe .scripts index is empty aswell.\r\n\r\nIs the documentation wrong or is it a bug?", "comments": [ { "body": "@svola you didn't mention that you have x-pack installed. this is a bug with the interaction between search templates and x-pack security. If you specify an index it'll work (eg `GET my_index/_search/template`)\r\n\r\nWe'll work on a fix for the search template API", "created_at": "2016-11-23T13:09:11Z" }, { "body": "@clintongormley Sorry I wasn't aware of x-pack. By specifying the search index it works. Thanks!", "created_at": "2016-11-23T13:16:27Z" }, { "body": "Ok now I tried to run \"render_template\" and it leads to the same issue.\r\nAdding an index before _render didn't help...\r\n\r\n\tGET _search/template/list_main_entity_0.0.1\r\n\r\n\t{\r\n\t \"lang\": \"mustache\",\r\n\t \"_id\": \"list_main_entity_0.0.1\",\r\n\t \"found\": true,\r\n\t \"template\": \"{\\\"size\\\":10,\\\"query\\\":{\\\"multi_match\\\":{\\\"query\\\":\\\"{{query_string}}\\\",\\\"fields\\\":[\\\"description\\\",\\\"title\\\"]}}}\"\r\n\t}\r\n\r\n\r\n\r\n\tGET /_render/template/list_main_entity_0.0.1\r\n\t{\r\n\t \"params\": {\r\n\t \"query_string\" : \"test\"\r\n\t }\r\n\t}\r\n\r\n\r\n\r\n\t{\r\n\t \"error\": {\r\n\t \"root_cause\": [\r\n\t {\r\n\t\t\"type\": \"illegal_state_exception\",\r\n\t\t\"reason\": \"There are no external requests known to support wildcards that don't support replacing their indices\"\r\n\t }\r\n\t ],\r\n\t \"type\": \"illegal_state_exception\",\r\n\t \"reason\": \"There are no external requests known to support wildcards that don't support replacing their indices\"\r\n\t },\r\n\t \"status\": 500\r\n\t}", "created_at": "2016-11-28T10:11:28Z" }, { "body": "yes @svola we are aware of that too. Render doesn't accept indices. You can work around it till a fix is out using the `/_search/template` endpoint with `simulate` set to `true` which does the same as what the render template endpoint does, but also allows setting the indices.", "created_at": "2016-11-28T15:12:54Z" }, { "body": "@javanna Where exactly should that simulate-parameter go? ", "created_at": "2016-11-28T16:12:06Z" }, { "body": "@svola nowhere, I am sorry, I got confused. The issue with the render endpoint cannot be worked around at the moment. We just have to release a fix.", "created_at": "2016-11-28T16:22:37Z" }, { "body": "Heads up, we have fixed this bug, this issue will be resolved with the 5.1 release. The render endpoint is problematic and there is no easy fix. As for the search template endpoint, the issue can worked around in 2.4.2 and 5.0.x by either using explicit indices or the deprecated template query as part of the search endpoint (rather than the search template endpoint).", "created_at": "2016-11-30T13:33:04Z" } ], "number": 21747, "title": "Pre-registered indexed search template not working " }
{ "body": "Given that `SearchTemplateRequest` effectively delegates to search when a search is being executed, it should implement the `CompositeIndicesRequest` interface. The `subrequests` method should return a single search request. When a search is not going to be executed, because we are in simulate mode, there are no inner requests, and there are no corresponding indices to that request either.\r\n\r\nCloses #21747", "number": 21865, "review_comments": [], "title": "SearchTemplateRequest to implement CompositeIndicesRequest" }
{ "commits": [ { "message": "SearchTemplateRequest to implement CompositeIndicesRequest\n\nGiven that SearchTemplateRequest effectively delegates to search when a search is being executed, it should implement the CompositeIndicesRequest interface. The subrequests method should return a single search request. When a search is not going to be executed, because we are in simulate mode, there are no inner requests, and there are no corresponding indices to that request either.\n\nCloses #21747" }, { "message": "fix compile error" } ], "files": [ { "diff": "@@ -25,7 +25,7 @@\n * Needs to be implemented by all {@link org.elasticsearch.action.ActionRequest} subclasses that are composed of multiple subrequests\n * which relate to one or more indices. Allows to retrieve those subrequests and reason about them separately. A composite request is\n * executed by its own transport action class (e.g. {@link org.elasticsearch.action.search.TransportMultiSearchAction}), which goes\n- * through all the subrequests and delegates their exection to the appropriate transport action (e.g.\n+ * through all the subrequests and delegates their execution to the appropriate transport action (e.g.\n * {@link org.elasticsearch.action.search.TransportSearchAction}) for each single item.\n */\n public interface CompositeIndicesRequest {", "filename": "core/src/main/java/org/elasticsearch/action/CompositeIndicesRequest.java", "status": "modified" }, { "diff": "@@ -63,7 +63,11 @@ public List<SearchTemplateRequest> requests() {\n \n @Override\n public List<? extends IndicesRequest> subRequests() {\n- return requests;\n+ List<IndicesRequest> indicesRequests = new ArrayList<>();\n+ for (SearchTemplateRequest request : requests) {\n+ indicesRequests.addAll(request.subRequests());\n+ }\n+ return indicesRequests;\n }\n \n @Override", "filename": "modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java", "status": "modified" }, { "diff": "@@ -21,23 +21,24 @@\n \n import org.elasticsearch.action.ActionRequest;\n import org.elasticsearch.action.ActionRequestValidationException;\n+import org.elasticsearch.action.CompositeIndicesRequest;\n import org.elasticsearch.action.IndicesRequest;\n import org.elasticsearch.action.search.SearchRequest;\n-import org.elasticsearch.action.support.IndicesOptions;\n-import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.script.ScriptType;\n \n import java.io.IOException;\n+import java.util.Collections;\n+import java.util.List;\n import java.util.Map;\n \n import static org.elasticsearch.action.ValidateActions.addValidationError;\n \n /**\n * A request to execute a search based on a search template.\n */\n-public class SearchTemplateRequest extends ActionRequest implements IndicesRequest {\n+public class SearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest {\n \n private SearchRequest request;\n private boolean simulate = false;\n@@ -167,12 +168,13 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n @Override\n- public String[] indices() {\n- return request != null ? request.indices() : Strings.EMPTY_ARRAY;\n- }\n-\n- @Override\n- public IndicesOptions indicesOptions() {\n- return request != null ? request.indicesOptions() : SearchRequest.DEFAULT_INDICES_OPTIONS;\n+ public List<? extends IndicesRequest> subRequests() {\n+ //if we are simulating no index is involved in the request\n+ if (simulate) {\n+ assert request == null;\n+ return Collections.emptyList();\n+ }\n+ //composite request as it delegates to search, but it holds one single action (search itself)\n+ return Collections.singletonList(request);\n }\n }", "filename": "modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java", "status": "modified" }, { "diff": "@@ -41,16 +41,16 @@ public void testParseRequest() throws Exception {\n assertThat(request.requests().size(), equalTo(3));\n assertThat(request.requests().get(0).getRequest().indices()[0], equalTo(\"test0\"));\n assertThat(request.requests().get(0).getRequest().indices()[1], equalTo(\"test1\"));\n- assertThat(request.requests().get(0).indices(), arrayContaining(\"test0\", \"test1\"));\n+ assertThat(request.requests().get(0).getRequest().indices(), arrayContaining(\"test0\", \"test1\"));\n assertThat(request.requests().get(0).getRequest().requestCache(), equalTo(true));\n assertThat(request.requests().get(0).getRequest().preference(), nullValue());\n- assertThat(request.requests().get(1).indices()[0], equalTo(\"test2\"));\n- assertThat(request.requests().get(1).indices()[1], equalTo(\"test3\"));\n+ assertThat(request.requests().get(1).getRequest().indices()[0], equalTo(\"test2\"));\n+ assertThat(request.requests().get(1).getRequest().indices()[1], equalTo(\"test3\"));\n assertThat(request.requests().get(1).getRequest().types()[0], equalTo(\"type1\"));\n assertThat(request.requests().get(1).getRequest().requestCache(), nullValue());\n assertThat(request.requests().get(1).getRequest().preference(), equalTo(\"_local\"));\n- assertThat(request.requests().get(2).indices()[0], equalTo(\"test4\"));\n- assertThat(request.requests().get(2).indices()[1], equalTo(\"test1\"));\n+ assertThat(request.requests().get(2).getRequest().indices()[0], equalTo(\"test4\"));\n+ assertThat(request.requests().get(2).getRequest().indices()[1], equalTo(\"test1\"));\n assertThat(request.requests().get(2).getRequest().types()[0], equalTo(\"type2\"));\n assertThat(request.requests().get(2).getRequest().types()[1], equalTo(\"type1\"));\n assertThat(request.requests().get(2).getRequest().routing(), equalTo(\"123\"));", "filename": "modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java", "status": "modified" } ] }
{ "body": "In it's current implementation, templates used in a rank eval request are executed when parsing the REST request like so:\r\n\r\nhttps://github.com/elastic/elasticsearch/blob/feature/rank-eval/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java#L156\r\n\r\nHowever we also allow templates to be set through a Java API call to \"setTemplate\" on that same spec. This doesn't go through template execution so fails further down the line.\r\n\r\nTo make this work, I believe we'd have to move template execution further down, probably to TransportRankEvalAction.\r\n\r\nWith the new Java REST client being refined and improved, I'm wondering if that change is actually worth it: I think, as soon as Java calls also go through REST the above refactoring would be obsolete.\r\n\r\nOpening this issue to track further discussion.", "comments": [ { "body": "@cbuescher @javanna any input from your side welcome.", "created_at": "2016-11-28T10:55:19Z" }, { "body": "Not sure we can yet define apis based on the fact that we will remove transport client at some point, hopefully in 6.0. Maybe it is worth adapting to make this api work with java api too?", "created_at": "2016-11-28T11:20:00Z" }, { "body": "@javanna Makes sense to me.", "created_at": "2016-11-29T12:45:58Z" }, { "body": "Closed by #21855", "created_at": "2016-12-07T10:10:44Z" } ], "number": 21777, "title": "Search templates for rank_eval endpoint currently only work when sent through REST end point" }
{ "body": "Relates to #21777\r\n \r\nSearch templates for rank_eval endpoint so far only worked when sent through\r\nREST end point\r\n \r\nHowever we also allow templates to be set through a Java API call to\r\n\"setTemplate\" on that same spec. This doesn't go through template execution so\r\nfails further down the line.\r\n \r\nTo make this work, moved template execution further down, probably to\r\nTransportRankEvalAction.\r\n\r\n@cbuescher would be nice if you could have a look", "number": 21855, "review_comments": [ { "body": "left over?", "created_at": "2016-12-05T11:14:41Z" }, { "body": "nit: you could use writeOptionalWriteable() here", "created_at": "2016-12-05T11:21:11Z" }, { "body": "nit: possibly use readOptionalWriteable here", "created_at": "2016-12-05T11:21:28Z" }, { "body": "Just a question: this means we are failing the whole rank_eval request here, no? I think this is okay, as long as this is what you intended. ", "created_at": "2016-12-05T11:23:57Z" }, { "body": "I think this should be fine. Not being able to parse the supplied template should be bad enough to fail the whole request.", "created_at": "2016-12-05T14:57:43Z" } ], "title": "Move rank-eval template compilation down to TransportRankEvalAction" }
{ "commits": [ { "message": "Add template IT test for Java API" }, { "message": "Merge branch 'feature/rank-eval' into feature/rank-eval-template-javaapi" }, { "message": "Move rank-eval template compilation down to TransportRankEvalAction\n\nRelates to #21777\n\nSearch templates for rank_eval endpoint so far only worked when sent through\nREST end point\n\nHowever we also allow templates to be set through a Java API call to\n\"setTemplate\" on that same spec. This doesn't go through template execution so\nfails further down the line.\n\nTo make this work, moved template execution further down, probably to\nTransportRankEvalAction." }, { "message": "Merge branch 'feature/rank-eval' into feature/rank-eval-template-javaapi" }, { "message": "Move template compilation to TransportRankEvalAction\n\nRelates to #21777" } ], "files": [ { "diff": "@@ -23,23 +23,17 @@\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParsingException;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.io.stream.Writeable;\n import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n-import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentParser;\n-import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.script.Script;\n-import org.elasticsearch.script.ScriptContext;\n-import org.elasticsearch.search.builder.SearchSourceBuilder;\n \n import java.io.IOException;\n import java.util.ArrayList;\n import java.util.Collection;\n-import java.util.Map;\n import java.util.Objects;\n \n /**\n@@ -154,24 +148,7 @@ public Script getTemplate() {\n }\n \n public static RankEvalSpec parse(XContentParser parser, RankEvalContext context, boolean templated) throws IOException {\n- RankEvalSpec spec = PARSER.parse(parser, context);\n-\n- if (templated) {\n- for (RatedRequest query_spec : spec.getSpecifications()) {\n- Map<String, Object> params = query_spec.getParams();\n- Script scriptWithParams = new Script(spec.template.getType(), spec.template.getLang(), spec.template.getIdOrCode(), params);\n- String resolvedRequest = ((BytesReference) (context.getScriptService()\n- .executable(scriptWithParams, ScriptContext.Standard.SEARCH).run())).utf8ToString();\n- try (XContentParser subParser = XContentFactory.xContent(resolvedRequest).createParser(resolvedRequest)) {\n- QueryParseContext parseContext = new QueryParseContext(context.getSearchRequestParsers().queryParsers, subParser,\n- context.getParseFieldMatcher());\n- SearchSourceBuilder templateResult = SearchSourceBuilder.fromXContent(parseContext, context.getAggs(),\n- context.getSuggesters(), context.getSearchExtParsers());\n- query_spec.setTestRequest(templateResult);\n- }\n- }\n- }\n- return spec;\n+ return PARSER.parse(parser, context);\n }\n \n @Override", "filename": "modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java", "status": "modified" }, { "diff": "@@ -74,7 +74,8 @@ public RatedRequest(String specId, SearchSourceBuilder testRequest, List<String>\n \n public RatedRequest(StreamInput in) throws IOException {\n this.specId = in.readString();\n- testRequest = new SearchSourceBuilder(in);\n+ testRequest = in.readOptionalWriteable(SearchSourceBuilder::new);\n+\n int indicesSize = in.readInt();\n indices = new ArrayList<>(indicesSize);\n for (int i = 0; i < indicesSize; i++) {\n@@ -101,7 +102,8 @@ public RatedRequest(StreamInput in) throws IOException {\n @Override\n public void writeTo(StreamOutput out) throws IOException {\n out.writeString(specId);\n- testRequest.writeTo(out);\n+ out.writeOptionalWriteable(testRequest);\n+\n out.writeInt(indices.size());\n for (String index : indices) {\n out.writeString(index);\n@@ -255,8 +257,9 @@ public static RatedRequest fromXContent(XContentParser parser, RankEvalContext c\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n builder.startObject();\n builder.field(ID_FIELD.getPreferredName(), this.specId);\n- if (testRequest != null)\n+ if (testRequest != null) {\n builder.field(REQUEST_FIELD.getPreferredName(), this.testRequest);\n+ }\n builder.startObject(PARAMS_FIELD.getPreferredName());\n for (Entry<String, Object> entry : this.params.entrySet()) {\n builder.field(entry.getKey(), entry.getValue());", "filename": "modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java", "status": "modified" }, { "diff": "@@ -26,14 +26,24 @@\n import org.elasticsearch.action.support.HandledTransportAction;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.query.QueryParseContext;\n+import org.elasticsearch.script.CompiledScript;\n+import org.elasticsearch.script.ScriptContext;\n+import org.elasticsearch.script.ScriptService;\n import org.elasticsearch.search.SearchHit;\n+import org.elasticsearch.search.SearchRequestParsers;\n import org.elasticsearch.search.builder.SearchSourceBuilder;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n \n+import java.io.IOException;\n import java.util.Collection;\n+import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n import java.util.concurrent.ConcurrentHashMap;\n@@ -51,12 +61,17 @@\n * */\n public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequest, RankEvalResponse> {\n private Client client;\n-\n+ private ScriptService scriptService;\n+ private SearchRequestParsers searchRequestParsers;\n+ \n @Inject\n public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,\n- IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService) {\n+ IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService,\n+ SearchRequestParsers searchRequestParsers, ScriptService scriptService) {\n super(settings, RankEvalAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,\n RankEvalRequest::new);\n+ this.searchRequestParsers = searchRequestParsers;\n+ this.scriptService = scriptService;\n this.client = client;\n }\n \n@@ -69,10 +84,25 @@ protected void doExecute(RankEvalRequest request, ActionListener<RankEvalRespons\n Map<String, EvalQueryQuality> partialResults = new ConcurrentHashMap<>(specifications.size());\n Map<String, Exception> errors = new ConcurrentHashMap<>(specifications.size());\n \n+ CompiledScript scriptWithoutParams = null;\n+ if (qualityTask.getTemplate() != null) {\n+ scriptWithoutParams = scriptService.compile(qualityTask.getTemplate(), ScriptContext.Standard.SEARCH, new HashMap<>());\n+ }\n for (RatedRequest querySpecification : specifications) {\n final RankEvalActionListener searchListener = new RankEvalActionListener(listener, qualityTask.getMetric(), querySpecification,\n partialResults, errors, responseCounter);\n SearchSourceBuilder specRequest = querySpecification.getTestRequest();\n+ if (specRequest == null) {\n+ Map<String, Object> params = querySpecification.getParams();\n+ String resolvedRequest = ((BytesReference) (scriptService.executable(scriptWithoutParams, params).run())).utf8ToString();\n+ try (XContentParser subParser = XContentFactory.xContent(resolvedRequest).createParser(resolvedRequest)) {\n+ QueryParseContext parseContext = new QueryParseContext(searchRequestParsers.queryParsers, subParser, parseFieldMatcher);\n+ specRequest = SearchSourceBuilder.fromXContent(parseContext, searchRequestParsers.aggParsers,\n+ searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);\n+ } catch (IOException e) {\n+ listener.onFailure(e);\n+ }\n+ }\n List<String> summaryFields = querySpecification.getSummaryFields();\n if (summaryFields.isEmpty()) {\n specRequest.fetchSource(false);", "filename": "modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java", "status": "modified" }, { "diff": "@@ -19,9 +19,9 @@\n \n apply plugin: 'elasticsearch.rest-test'\n \n-/*\n+\n dependencies {\n testCompile project(path: ':modules:rank-eval', configuration: 'runtime')\n testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')\n }\n-*/\n+", "filename": "qa/smoke-test-rank-eval-with-mustache/build.gradle", "status": "modified" }, { "diff": "@@ -0,0 +1,122 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.smoketest;\n+\n+import org.elasticsearch.index.rankeval.Precision;\n+import org.elasticsearch.index.rankeval.RankEvalAction;\n+import org.elasticsearch.index.rankeval.RankEvalPlugin;\n+import org.elasticsearch.index.rankeval.RankEvalRequest;\n+import org.elasticsearch.index.rankeval.RankEvalRequestBuilder;\n+import org.elasticsearch.index.rankeval.RankEvalResponse;\n+import org.elasticsearch.index.rankeval.RankEvalSpec;\n+import org.elasticsearch.index.rankeval.RatedDocument;\n+import org.elasticsearch.index.rankeval.RatedRequest;\n+import org.elasticsearch.plugins.Plugin;\n+import org.elasticsearch.script.Script;\n+import org.elasticsearch.script.ScriptType;\n+import org.elasticsearch.test.ESIntegTestCase;\n+import org.junit.Before;\n+\n+import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.Collection;\n+import java.util.HashMap;\n+import java.util.List;\n+import java.util.Map;\n+\n+\n+public class SmokeMultipleTemplatesIT extends ESIntegTestCase {\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> transportClientPlugins() {\n+ return Arrays.asList(RankEvalPlugin.class);\n+ }\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> nodePlugins() {\n+ return Arrays.asList(RankEvalPlugin.class);\n+ }\n+\n+ @Before\n+ public void setup() {\n+ createIndex(\"test\");\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"testtype\").setId(\"1\")\n+ .setSource(\"text\", \"berlin\", \"title\", \"Berlin, Germany\").get();\n+ client().prepareIndex(\"test\", \"testtype\").setId(\"2\")\n+ .setSource(\"text\", \"amsterdam\").get();\n+ client().prepareIndex(\"test\", \"testtype\").setId(\"3\")\n+ .setSource(\"text\", \"amsterdam\").get();\n+ client().prepareIndex(\"test\", \"testtype\").setId(\"4\")\n+ .setSource(\"text\", \"amsterdam\").get();\n+ client().prepareIndex(\"test\", \"testtype\").setId(\"5\")\n+ .setSource(\"text\", \"amsterdam\").get();\n+ client().prepareIndex(\"test\", \"testtype\").setId(\"6\")\n+ .setSource(\"text\", \"amsterdam\").get();\n+ refresh();\n+ }\n+\n+ public void testPrecisionAtRequest() throws IOException {\n+ List<String> indices = Arrays.asList(new String[] { \"test\" });\n+ List<String> types = Arrays.asList(new String[] { \"testtype\" });\n+\n+ List<RatedRequest> specifications = new ArrayList<>();\n+ RatedRequest amsterdamRequest = new RatedRequest(\"amsterdam_query\", null, indices, types, createRelevant(\"2\", \"3\", \"4\", \"5\"));\n+ Map<String, Object> ams_params = new HashMap<>();\n+ ams_params.put(\"querystring\", \"amsterdam\");\n+ amsterdamRequest.setParams(ams_params);\n+ specifications.add(amsterdamRequest);\n+\n+ RatedRequest berlinRequest = new RatedRequest(\"berlin_query\", null, indices, types, createRelevant(\"1\"));\n+ Map<String, Object> berlin_params = new HashMap<>();\n+ berlin_params.put(\"querystring\", \"berlin\");\n+ berlinRequest.setParams(berlin_params);\n+ specifications.add(berlinRequest);\n+\n+ Precision metric = new Precision();\n+ RankEvalSpec task = new RankEvalSpec(specifications, metric);\n+ task.setTemplate(\n+ new Script(\n+ ScriptType.INLINE,\n+ \"mustache\", \"{\\\"query\\\": {\\\"match\\\": {\\\"text\\\": \\\"{{querystring}}\\\"}}}\",\n+ new HashMap<>()));\n+\n+ RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest());\n+ builder.setRankEvalSpec(task);\n+\n+ RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet();\n+ assertEquals(0.9, response.getQualityLevel(), Double.MIN_VALUE);\n+ }\n+\n+ private static List<RatedDocument> createRelevant(String... docs) {\n+ List<RatedDocument> relevant = new ArrayList<>();\n+ for (String doc : docs) {\n+ relevant.add(new RatedDocument(\"test\", \"testtype\", doc, Rating.RELEVANT.ordinal()));\n+ }\n+ return relevant;\n+ }\n+\n+ public enum Rating {\n+ IRRELEVANT, RELEVANT;\n+ }\n+\n+ }", "filename": "qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeMultipleTemplatesIT.java", "status": "added" } ] }
{ "body": "The Unicast Zen Ping service pings all known nodes every 3 seconds using a light connecting method. For nodes defined in the configuration as unicast hosts and not yet \"found by address\" (meaning that a successful connection has never been established to them) the node is added to a list of nodes to disconnect once the ping is terminated whatever the result of the ping. The round of pings is executed until a master is elected, but if no master can be elected (because of min master nodes or in case of a tribe client node with an offline remote cluster) the pings are executed over and over.\n\nThe thing is that nodes are pinged every `3s` but the connection timeout is configured by default to `30s`. This leads to a situation where many threads are created and added to the generic thread pool in order to disconnect from the node but the disconnect method `TcpTransport.disconnectFromNode(DiscoveryNode node)` blindly tries to acquire a lock on the node even if it will be impossible to disconnect from it (because node is not reachable).  So disconnecting threads are stacked at the rate of 1 every 3sec until the generic thread pool is full.\n\nAdding a check in the `TcpTransport.disconnectFromNode(DiscoveryNode node)` similar to the check done in`disconnectFromNode(DiscoveryNode node, Channel channel, String reason)` avoids threads to block for nothing.\n\nWe could also use a connection timeout of 3s when pinging nodes as it would help to fail connection faster and it would keep the number of blocking threads lower but would not resolve the main issue of threads blocking for nothing.\n\nThis settings can be used to reproduce the issue (check number of threads of generic thread pool):\n\n```\ntribe.t1.cluster.name: \"offline\"\ntribe.t1.discovery.zen.ping.unicast.hosts:\n- '10.10.10.10'\n```\n\nor\n\n```\ndiscovery.zen.minimum_master_nodes: 2\ndiscovery.zen.ping.unicast.hosts:\n- '10.10.10.10'\n```\n\ncloses #19370\n\nI think we have the same issue in 2.x. in `NettyTransport`\n", "comments": [ { "body": "Oh, I did not come up with a good test for this so if anyone has an idea :)\n", "created_at": "2016-08-01T13:16:06Z" }, { "body": "@bleskes or @jasontedor can anyone of you have a look at this? That would be great, thanks!\n", "created_at": "2016-08-01T13:17:09Z" }, { "body": "Thx @tlrx . I left a comment. Regarding testing - you can maybe use MockTransportService and add a connection rules that waits some time before throwing an exception. Another alternative is to implement a MockTcpTransport that behaves as you want. Maybe @jasontedor has a better idea.\n", "created_at": "2016-08-03T10:12:53Z" }, { "body": "@bleskes Thanks for your comments.\n\n> we can not check out of lock. The problem is that we need to make sure that we have a stricit linearization of connection operation.\n\nI thought about this again and I agree the \"fix\" I proposed is not the right thing to do. Like I said in the description of this PR, the threads are piling up again and again because we try to disconnect from a node even if we never succeed to connect to it and that does not make sense. \n\n> I think the right solution here is to add timeouts to the connections done from the pings? maybe an easy way is to have a different connection timeout for \"light\" connections then we do for normal ones. \n\nThat may to fail pings and disconnectings sooner but it won't fix the origin of the issue: we try to disconnect from nodes we never connect to. It seems like a waste of resources to me.\n\nI changed the fix to only disconnect from node we successfully connected to (in light mode) and added a test. Please let me know what you think about this change.\n", "created_at": "2016-08-24T10:30:55Z" }, { "body": "@bleskes waiting for your review\n", "created_at": "2016-10-18T08:20:38Z" }, { "body": "@tlrx and I discussed this. While this is a good change, there is still an issue where slow pinging (due to connection timeouts) can cause thread queues to fill up. @tlrx is evaluating the scope of the issue.\n", "created_at": "2016-10-19T10:15:45Z" }, { "body": "Superseded by #22277", "created_at": "2017-01-05T08:41:57Z" } ], "number": 19719, "title": "Avoid zen pinging threads to pile up" }
{ "body": "Timeouts are global today across all connections this commit allows to specify\r\na connection timeout per node such that depending on the context connections can\r\nbe established with different timeouts.\r\n\r\nRelates to #19719", "number": 21847, "review_comments": [ { "body": "oncewe -> once we", "created_at": "2016-11-29T10:52:47Z" }, { "body": "queu -> queue", "created_at": "2016-11-29T10:53:00Z" }, { "body": "left over", "created_at": "2016-11-29T10:53:46Z" }, { "body": "Could we document where/when this light profile is used? For what I see it's used to connect to nodes on zen pings & transport client sniffer. Also, maybe we could use a lower timeout?", "created_at": "2016-11-29T11:01:29Z" }, { "body": "I think this should be a different change. The documentation is the reference", "created_at": "2016-11-29T13:10:52Z" }, { "body": "true :)", "created_at": "2016-11-29T13:10:57Z" }, { "body": "Okay", "created_at": "2016-11-29T13:26:00Z" }, { "body": "The condition (less than zero) and the message (positive) are at odds with each other. Either the condition should be less than or equal to zero, or the message should say non-negative.", "created_at": "2016-11-30T04:31:09Z" }, { "body": "Can we name the parameter `connectTimeout` (to match the field being set)?", "created_at": "2016-11-30T04:31:33Z" }, { "body": "I like this test, maybe just add an `assumeTrue(Constants.LINUX)` (even then I think we want >= 2.2 kernels for the modern backlog behavior, but that's mostly a given these days anyway so I'm not going to lose sleep over it)?", "created_at": "2016-11-30T04:35:24Z" }, { "body": "it's actually not working on linux but on MacOS and I guess it would work on other BSDs too. On MacOS you see that packages are just dropped if they queue is full\r\n```tcpdump\r\npanthor:elasticsearch simon$ sudo tcpdump -i lo0 port 6660\r\ntcpdump: verbose output suppressed, use -v or -vv for full protocol decode\r\nlistening on lo0, link-type NULL (BSD loopback), capture size 262144 bytes\r\n13:00:50.896080 IP localhost.52585 > localhost.6660: Flags [SEW], seq 3480902841, win 65535, options [mss 16344,nop,wscale 5,nop,nop,TS val 249070543 ecr 0,sackOK,eol], length 0\r\n13:00:50.896175 IP localhost.6660 > localhost.52585: Flags [S.E], seq 684828528, ack 3480902842, win 65535, options [mss 16344,nop,wscale 5,nop,nop,TS val 249070543 ecr 249070543,sackOK,eol], length 0\r\n13:00:50.896192 IP localhost.52585 > localhost.6660: Flags [.], ack 1, win 12759, options [nop,nop,TS val 249070543 ecr 249070543], length 0\r\n13:00:50.896200 IP localhost.6660 > localhost.52585: Flags [.], ack 1, win 12759, options [nop,nop,TS val 249070543 ecr 249070543], length 0\r\n13:00:50.897563 IP localhost.52586 > localhost.6660: Flags [SEW], seq 3356989603, win 65535, options [mss 16344,nop,wscale 5,nop,nop,TS val 249070544 ecr 0,sackOK,eol], length 0\r\n13:00:50.898729 IP localhost.52586 > localhost.6660: Flags [F], seq 3356989603, win 65535, options [nop,nop,TS val 249070545 ecr 0], length 0\r\n13:00:51.096372 IP localhost.6660 > localhost.52585: Flags [R.], seq 1, ack 1, win 12759, length 0\r\n```\r\n\r\nwhile on linux it acks the syn package and establishes the connection:\r\n\r\n```tcpdump\r\nroot@monster:/home/simon# tcpdump -i any port 6660\r\ntcpdump: verbose output suppressed, use -v or -vv for full protocol decode\r\nlistening on any, link-type LINUX_SLL (Linux cooked), capture size 65535 bytes\r\n12:57:56.364461 IP localhost.localdomain.51787 > localhost.localdomain.6660: Flags [S], seq 2528693146, win 32792, options [mss 16396,sackOK,TS val 364159 ecr 0,nop,wscale 7], length 0\r\n12:57:56.364472 IP localhost.localdomain.6660 > localhost.localdomain.51787: Flags [S.], seq 1501812814, ack 2528693147, win 32768, options [mss 16396,sackOK,TS val 364159 ecr 364159,nop,wscale 7], length 0\r\n12:57:56.364478 IP localhost.localdomain.51787 > localhost.localdomain.6660: Flags [.], ack 1, win 257, options [nop,nop,TS val 364159 ecr 364159], length 0\r\n12:57:56.366587 IP localhost.localdomain.51788 > localhost.localdomain.6660: Flags [S], seq 3949013427, win 32792, options [mss 16396,sackOK,TS val 364159 ecr 0,nop,wscale 7], length 0\r\n12:57:56.366596 IP localhost.localdomain.6660 > localhost.localdomain.51788: Flags [S.], seq 2483339599, ack 3949013428, win 32768, options [mss 16396,sackOK,TS val 364159 ecr 364159,nop,wscale 7], length 0\r\n12:57:56.366602 IP localhost.localdomain.51788 > localhost.localdomain.6660: Flags [.], ack 1, win 257, options [nop,nop,TS val 364159 ecr 364159], length 0\r\n12:57:56.367652 IP localhost.localdomain.6660 > localhost.localdomain.51787: Flags [R.], seq 1, ack 1, win 256, options [nop,nop,TS val 364159 ecr 364159], length 0\r\n12:57:56.367676 IP localhost.localdomain.6660 > localhost.localdomain.51788: Flags [R.], seq 1, ack 1, win 256, options [nop,nop,TS val 364159 ecr 364159], length 0\r\n```\r\n\r\nit's a bit annoying since I think the test is good though... I can just `assumeTrue(MacOS)` for now...", "created_at": "2016-11-30T12:09:32Z" }, { "body": "Yeah, the backlogs work differently on Linux and BSD (BSD has one queue for incomplete and established connections, Linux has two and the backlog only applies to the latter). Can you try setting `/proc/sys/net/ipv4/tcp_abort_on_overflow` to `1` on your Linux machine and see if it behaves as expected? (I don't think we want to expect this to be the case, I'm just curious if it would account for the difference).\r\n\r\nI'm not sure how I feel about this test being macOS only, that means it will not ever run in CI.", "created_at": "2016-11-30T15:28:37Z" }, { "body": "We discussed this offline. I'm good with getting this in as is, and we'll follow up on the status of a Mac in CI.", "created_at": "2016-11-30T16:26:40Z" } ], "title": "Add a connect timeout to the ConnectionProfile to allow per node connect timeouts" }
{ "commits": [ { "message": "Add a connect timeout to the ConnectionProfile to allow per node connect timeouts\n\nTimeouts are global today across all connections this commit allows to specify\na connection timeout per node such that depending on the context connections can\nbe established with different timeouts.\n\nRelates to #19719" }, { "message": "Apply review comments" }, { "message": "nocommit -> note" }, { "message": "apply feedback" }, { "message": "assume we are on BSD" }, { "message": "Merge branch 'master' into timeout_on_connection_profile" }, { "message": "also run test on windows" }, { "message": "Merge branch 'master' into timeout_on_connection_profile" }, { "message": "fix compile errors" } ], "files": [ { "diff": "@@ -18,6 +18,8 @@\n */\n package org.elasticsearch.transport;\n \n+import org.elasticsearch.common.unit.TimeValue;\n+\n import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.Collections;\n@@ -42,14 +44,16 @@ public final class ConnectionProfile {\n TransportRequestOptions.Type.PING,\n TransportRequestOptions.Type.RECOVERY,\n TransportRequestOptions.Type.REG,\n- TransportRequestOptions.Type.STATE))), 1);\n+ TransportRequestOptions.Type.STATE))), 1, null);\n \n private final List<ConnectionTypeHandle> handles;\n private final int numConnections;\n+ private final TimeValue connectTimeout;\n \n- private ConnectionProfile(List<ConnectionTypeHandle> handles, int numConnections) {\n+ private ConnectionProfile(List<ConnectionTypeHandle> handles, int numConnections, TimeValue connectTimeout) {\n this.handles = handles;\n this.numConnections = numConnections;\n+ this.connectTimeout = connectTimeout;\n }\n \n /**\n@@ -59,6 +63,17 @@ public static class Builder {\n private final List<ConnectionTypeHandle> handles = new ArrayList<>();\n private final Set<TransportRequestOptions.Type> addedTypes = EnumSet.noneOf(TransportRequestOptions.Type.class);\n private int offset = 0;\n+ private TimeValue connectTimeout;\n+\n+ /**\n+ * Sets a connect connectTimeout for this connection profile\n+ */\n+ public void setConnectTimeout(TimeValue connectTimeout) {\n+ if (connectTimeout.millis() < 0) {\n+ throw new IllegalArgumentException(\"connectTimeout must be non-negative but was: \" + connectTimeout);\n+ }\n+ this.connectTimeout = connectTimeout;\n+ }\n \n /**\n * Adds a number of connections for one or more types. Each type can only be added once.\n@@ -89,8 +104,16 @@ public ConnectionProfile build() {\n if (types.isEmpty() == false) {\n throw new IllegalStateException(\"not all types are added for this connection profile - missing types: \" + types);\n }\n- return new ConnectionProfile(Collections.unmodifiableList(handles), offset);\n+ return new ConnectionProfile(Collections.unmodifiableList(handles), offset, connectTimeout);\n }\n+\n+ }\n+\n+ /**\n+ * Returns the connect timeout or <code>null</code> if no explicit timeout is set on this profile.\n+ */\n+ public TimeValue getConnectTimeout() {\n+ return connectTimeout;\n }\n \n /**", "filename": "core/src/main/java/org/elasticsearch/transport/ConnectionProfile.java", "status": "modified" }, { "diff": "@@ -150,7 +150,6 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i\n private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9);\n private static final int PING_DATA_SIZE = -1;\n \n- protected final TimeValue connectTimeout;\n protected final boolean blockingClient;\n private final CircuitBreakerService circuitBreakerService;\n // package visibility for tests\n@@ -190,9 +189,6 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo\n this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);\n this.networkService = networkService;\n this.transportName = transportName;\n-\n-\n- this.connectTimeout = TCP_CONNECT_TIMEOUT.get(settings);\n this.blockingClient = TCP_BLOCKING_CLIENT.get(settings);\n defaultConnectionProfile = buildDefaultConnectionProfile(settings);\n }\n@@ -204,6 +200,7 @@ static ConnectionProfile buildDefaultConnectionProfile(Settings settings) {\n int connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings);\n int connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings);\n ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ builder.setConnectTimeout(TCP_CONNECT_TIMEOUT.get(settings));\n builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK);\n builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING);\n // if we are not master eligible we don't need a dedicated channel to publish the state", "filename": "core/src/main/java/org/elasticsearch/transport/TcpTransport.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.transport;\n \n+import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.test.ESTestCase;\n import org.hamcrest.Matchers;\n \n@@ -27,6 +28,11 @@ public class ConnectionProfileTests extends ESTestCase {\n \n public void testBuildConnectionProfile() {\n ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ TimeValue connectTimeout = TimeValue.timeValueMillis(randomIntBetween(1, 10));\n+ final boolean setConnectTimeout = randomBoolean();\n+ if (setConnectTimeout) {\n+ builder.setConnectTimeout(connectTimeout);\n+ }\n builder.addConnections(1, TransportRequestOptions.Type.BULK);\n builder.addConnections(2, TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY);\n builder.addConnections(3, TransportRequestOptions.Type.PING);\n@@ -39,6 +45,11 @@ public void testBuildConnectionProfile() {\n builder.addConnections(4, TransportRequestOptions.Type.REG);\n ConnectionProfile build = builder.build();\n assertEquals(10, build.getNumConnections());\n+ if (setConnectTimeout) {\n+ assertEquals(connectTimeout, build.getConnectTimeout());\n+ } else {\n+ assertNull(build.getConnectTimeout());\n+ }\n Integer[] array = new Integer[10];\n for (int i = 0; i < array.length; i++) {\n array[i] = i;", "filename": "core/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java", "status": "modified" }, { "diff": "@@ -55,6 +55,7 @@\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.ByteSizeValue;\n+import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.BigArrays;\n import org.elasticsearch.common.util.concurrent.EsExecutors;\n import org.elasticsearch.common.util.concurrent.FutureUtils;\n@@ -204,7 +205,7 @@ private Bootstrap createBootstrap() {\n \n bootstrap.handler(getClientChannelInitializer());\n \n- bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(connectTimeout.millis()));\n+ bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(defaultConnectionProfile.getConnectTimeout().millis()));\n bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings));\n bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings));\n \n@@ -270,7 +271,8 @@ private void createServerBootstrap(String name, Settings settings) {\n logger.debug(\"using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], \"\n + \"connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]\",\n name, workerCount, settings.get(\"port\"), settings.get(\"bind_host\"), settings.get(\"publish_host\"), compress,\n- connectTimeout, defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY),\n+ defaultConnectionProfile.getConnectTimeout(),\n+ defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY),\n defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK),\n defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.REG),\n defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE),\n@@ -343,7 +345,18 @@ protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile p\n final NodeChannels nodeChannels = new NodeChannels(channels, profile);\n boolean success = false;\n try {\n- int numConnections = channels.length;\n+ final int numConnections = channels.length;\n+ final TimeValue connectTimeout;\n+ final Bootstrap bootstrap;\n+ final TimeValue defaultConnectTimeout = defaultConnectionProfile.getConnectTimeout();\n+ if (profile.getConnectTimeout() != null && profile.getConnectTimeout().equals(defaultConnectTimeout) == false) {\n+ bootstrap = this.bootstrap.clone(this.bootstrap.config().group());\n+ bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(profile.getConnectTimeout().millis()));\n+ connectTimeout = profile.getConnectTimeout();\n+ } else {\n+ connectTimeout = defaultConnectTimeout;\n+ bootstrap = this.bootstrap;\n+ }\n final ArrayList<ChannelFuture> connections = new ArrayList<>(numConnections);\n final InetSocketAddress address = node.getAddress().address();\n for (int i = 0; i < numConnections; i++) {", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import org.apache.logging.log4j.message.ParameterizedMessage;\n import org.apache.logging.log4j.util.Supplier;\n+import org.apache.lucene.util.Constants;\n import org.elasticsearch.ExceptionsHelper;\n import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionListenerResponseHandler;\n@@ -44,7 +45,12 @@\n import org.junit.Before;\n \n import java.io.IOException;\n+import java.net.InetAddress;\n+import java.net.InetSocketAddress;\n+import java.net.ServerSocket;\n+import java.sql.Time;\n import java.util.ArrayList;\n+import java.util.Collections;\n import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n@@ -1721,4 +1727,46 @@ public void testRegisterHandlerTwice() {\n serviceA.registerRequestHandler(\"action1\", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC),\n (request, message) -> {throw new AssertionError(\"boom\");});\n }\n+\n+ public void testTimeoutPerConnection() throws IOException {\n+ assumeTrue(\"Works only on BSD network stacks and apparently windows\",\n+ Constants.MAC_OS_X || Constants.FREE_BSD || Constants.WINDOWS);\n+ try (ServerSocket socket = new ServerSocket()) {\n+ // note - this test uses backlog=1 which is implementation specific ie. it might not work on some TCP/IP stacks\n+ // on linux (at least newer ones) the listen(addr, backlog=1) should just ignore new connections if the queue is full which\n+ // means that once we received an ACK from the client we just drop the packet on the floor (which is what we want) and we run\n+ // into a connection timeout quickly. Yet other implementations can for instance can terminate the connection within the 3 way\n+ // handshake which I haven't tested yet.\n+ socket.bind(new InetSocketAddress(InetAddress.getLocalHost(), 0), 1);\n+ socket.setReuseAddress(true);\n+ DiscoveryNode first = new DiscoveryNode(\"TEST\", new TransportAddress(socket.getInetAddress(),\n+ socket.getLocalPort()), emptyMap(),\n+ emptySet(), version0);\n+ DiscoveryNode second = new DiscoveryNode(\"TEST\", new TransportAddress(socket.getInetAddress(),\n+ socket.getLocalPort()), emptyMap(),\n+ emptySet(), version0);\n+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();\n+ builder.addConnections(1,\n+ TransportRequestOptions.Type.BULK,\n+ TransportRequestOptions.Type.PING,\n+ TransportRequestOptions.Type.RECOVERY,\n+ TransportRequestOptions.Type.REG,\n+ TransportRequestOptions.Type.STATE);\n+\n+ // connection with one connection and a large timeout -- should consume the one spot in the backlog queue\n+ serviceA.connectToNode(first, builder.build());\n+ builder.setConnectTimeout(TimeValue.timeValueMillis(1));\n+ final ConnectionProfile profile = builder.build();\n+ // now with the 1ms timeout we got and test that is it's applied\n+ long startTime = System.nanoTime();\n+ ConnectTransportException ex = expectThrows(ConnectTransportException.class, () -> {\n+ serviceA.connectToNode(second, profile);\n+ });\n+ final long now = System.nanoTime();\n+ final long timeTaken = TimeValue.nsecToMSec(now - startTime);\n+ assertTrue(\"test didn't timeout quick enough, time taken: [\" + timeTaken + \"]\",\n+ timeTaken < TimeValue.timeValueSeconds(5).millis());\n+ assertEquals(ex.getMessage(), \"[][\" + second.getAddress() + \"] connect_timeout[1ms]\");\n+ }\n+ }\n }", "filename": "test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java", "status": "modified" }, { "diff": "@@ -30,6 +30,7 @@\n import org.elasticsearch.common.network.NetworkService;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.ByteSizeValue;\n+import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.BigArrays;\n import org.elasticsearch.common.util.CancellableThreads;\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n@@ -46,6 +47,7 @@\n import java.net.ServerSocket;\n import java.net.Socket;\n import java.net.SocketException;\n+import java.net.SocketTimeoutException;\n import java.util.List;\n import java.util.Map;\n import java.util.concurrent.ConcurrentHashMap;\n@@ -178,7 +180,13 @@ protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile p\n final InetSocketAddress address = node.getAddress().address();\n // we just use a single connections\n configureSocket(socket);\n- socket.connect(address, (int) TCP_CONNECT_TIMEOUT.get(settings).millis());\n+ final TimeValue connectTimeout = profile.getConnectTimeout() == null ? defaultConnectionProfile.getConnectTimeout()\n+ : profile.getConnectTimeout();\n+ try {\n+ socket.connect(address, Math.toIntExact(connectTimeout.millis()));\n+ } catch (SocketTimeoutException ex) {\n+ throw new ConnectTransportException(node, \"connect_timeout[\" + connectTimeout + \"]\", ex);\n+ }\n MockChannel channel = new MockChannel(socket, address, \"none\", onClose);\n channel.loopRead(executor);\n mockChannels[0] = channel;", "filename": "test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java", "status": "modified" } ] }
{ "body": "On 5.0.1:\r\n\r\n```\r\nPUT t \r\n{\r\n \"mappings\": {\r\n \"t\": {\r\n \"properties\": {\r\n \"foo\": {\r\n \"type\": \"nested\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nPUT t/t/1\r\n{\r\n \"foo.bar\": 5\r\n}\r\n\r\nPUT t/t/2\r\n{\r\n \"foo\": {\"bar\": 5}\r\n}\r\n\r\n```\r\n\r\nReturns Doc 1\r\n```\r\nGET t/_search\r\n{\r\n \"query\": {\r\n \"match\": {\r\n \"foo.bar\": 5\r\n }\r\n }\r\n}\r\n```\r\n\r\nReturns Doc 2\r\n```\r\nGET t/_search\r\n{\r\n \"query\": {\r\n \"nested\": {\r\n \"path\": \"foo\",\r\n \"query\": {\r\n \"match\": {\r\n \"foo.bar\": 5\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n```\r\n", "comments": [ { "body": "I don't like the ambiguity that exists with dots in field names and nested mappings. For instance the below document:\r\n\r\n```\r\n{\r\n \"foo.bar\": 3,\r\n \"foo.baz\": 42\r\n}\r\n```\r\n\r\ncould be both equivalent to\r\n\r\n```\r\n{\r\n \"foo\": {\r\n \"bar\": 3,\r\n \"baz\": 42\r\n }\r\n}\r\n```\r\n\r\nand\r\n\r\n```\r\n{\r\n \"foo\": [\r\n { \"bar\": 3 },\r\n { \"baz\": 42 }\r\n ]\r\n}\r\n```\r\n\r\nwhich are indexed differently if `foo` is a nested object.\r\n\r\nSo should we reject dots in field names when the dot is a delimitation for a nested object?", "created_at": "2016-11-22T15:37:45Z" }, { "body": "> So should we reject dots in field names when the dot is a delimitation for a nested object?\r\n\r\nI think we should. How do we do this in a way that doesn't break behaviour in a minor version?", "created_at": "2016-11-24T15:45:52Z" }, { "body": "> How do we do this in a way that doesn't break behaviour in a minor version?\r\n\r\nThe bug fix consists of raising an error, so I don't think there is any way to fix the bug and not break in a minor release at the same time. I see this bug as an index corruption since fields do not go to the intended document so I would vote to raise an error in all cases rather than trying to keep the bug for old indices.", "created_at": "2016-11-24T18:03:26Z" }, { "body": "It might be, that we're hitting this issue in ElasticSearch 2.4.2, as well. In our case, we use fields whose names have dots in them. We're running queries on them with range filters inside a boolean filter and the query returns documents that have values outside the defined range. Does this bug pertain the issue I'm seeing as well?", "created_at": "2016-11-29T11:56:54Z" }, { "body": "@Thermi hard to say - i'd open a new issue providing a full curl recreation of what you're seeing and we can go from there", "created_at": "2016-11-29T12:52:01Z" }, { "body": "Please disregard the sentence about dots in field names. That's not the case. I'll open a new issue on this repo for the problem I'm seeing.", "created_at": "2016-11-30T13:54:06Z" } ], "number": 21726, "title": "Dots in field names doesn't work with nested fields" }
{ "body": "Closes #21726", "number": 21787, "review_comments": [], "title": "Fail to index fields with dots in field names when one of the intermediate objects is nested." }
{ "commits": [ { "message": "Fail to index fields with dots in field names when one of the intermediate objects is nested.\n\nCloses #21726" }, { "message": "iter" }, { "message": "iter" } ], "files": [ { "diff": "@@ -859,7 +859,8 @@ private static Tuple<Integer, ObjectMapper> getDynamicParentMapper(ParseContext\n Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());\n mapper = (ObjectMapper) builder.build(builderContext);\n if (mapper.nested() != ObjectMapper.Nested.NO) {\n- throw new MapperParsingException(\"It is forbidden to create dynamic nested objects ([\" + context.path().pathAsText(paths[i]) + \"]) through `copy_to`\");\n+ throw new MapperParsingException(\"It is forbidden to create dynamic nested objects ([\" + context.path().pathAsText(paths[i])\n+ + \"]) through `copy_to` or dots in field names\");\n }\n context.addDynamicMapper(mapper);\n break;\n@@ -909,6 +910,11 @@ static Mapper getMapper(ObjectMapper objectMapper, String fieldName) {\n return null;\n }\n objectMapper = (ObjectMapper)mapper;\n+ if (objectMapper.nested().isNested()) {\n+ throw new MapperParsingException(\"Cannot add a value for field [\"\n+ + fieldName + \"] since one of the intermediate objects is mapped as a nested object: [\"\n+ + mapper.name() + \"]\");\n+ }\n }\n return objectMapper.getMapper(subfields[subfields.length - 1]);\n }", "filename": "core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java", "status": "modified" }, { "diff": "@@ -120,6 +120,51 @@ public void testDotsWithExistingMapper() throws Exception {\n assertEquals(\"789\", values[2]);\n }\n \n+ public void testDotsWithExistingNestedMapper() throws Exception {\n+ DocumentMapperParser mapperParser = createIndex(\"test\").mapperService().documentMapperParser();\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\").startObject(\"properties\")\n+ .startObject(\"foo\").field(\"type\", \"nested\").startObject(\"properties\")\n+ .startObject(\"bar\").field(\"type\", \"integer\")\n+ .endObject().endObject().endObject().endObject().endObject().endObject().string();\n+ DocumentMapper mapper = mapperParser.parse(\"type\", new CompressedXContent(mapping));\n+\n+ BytesReference bytes = XContentFactory.jsonBuilder()\n+ .startObject()\n+ .field(\"foo.bar\", 123)\n+ .endObject().bytes();\n+ MapperParsingException e = expectThrows(MapperParsingException.class,\n+ () -> mapper.parse(\"test\", \"type\", \"1\", bytes));\n+ assertEquals(\n+ \"Cannot add a value for field [foo.bar] since one of the intermediate objects is mapped as a nested object: [foo]\",\n+ e.getMessage());\n+ }\n+\n+ public void testDotsWithDynamicNestedMapper() throws Exception {\n+ DocumentMapperParser mapperParser = createIndex(\"test\").mapperService().documentMapperParser();\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startArray(\"dynamic_templates\")\n+ .startObject()\n+ .startObject(\"objects_as_nested\")\n+ .field(\"match_mapping_type\", \"object\")\n+ .startObject(\"mapping\")\n+ .field(\"type\", \"nested\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endArray().endObject().endObject().string();\n+ DocumentMapper mapper = mapperParser.parse(\"type\", new CompressedXContent(mapping));\n+\n+ BytesReference bytes = XContentFactory.jsonBuilder()\n+ .startObject()\n+ .field(\"foo.bar\",42)\n+ .endObject().bytes();\n+ MapperParsingException e = expectThrows(MapperParsingException.class,\n+ () -> mapper.parse(\"test\", \"type\", \"1\", bytes));\n+ assertEquals(\n+ \"It is forbidden to create dynamic nested objects ([foo]) through `copy_to` or dots in field names\",\n+ e.getMessage());\n+ }\n+\n public void testPropagateDynamicWithExistingMapper() throws Exception {\n DocumentMapperParser mapperParser = createIndex(\"test\").mapperService().documentMapperParser();\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java", "status": "modified" } ] }
{ "body": "Currently we have these lines:\n\n```\n//// System code permissions:\n//// These permissions apply to the JDK itself:\n\ngrant codeBase \"file:${{java.ext.dirs}}/*\" {\n permission java.security.AllPermission;\n};\n```\n\nBut this is not really offically correct, and a problem going forwards, see https://bugs.openjdk.java.net/browse/JDK-8040059 and related issues (https://bugs.openjdk.java.net/secure/IssueNavigator.jspa?reset=true&jqlQuery=labels+%3D+deprivilege)\n\nCurrent java 9 config is here: http://hg.openjdk.java.net/jdk9/jdk9/jdk/file/tip/src/java.base/share/conf/security/java.policy\n\nSo we must properly bring in the system policy, and just disable the bad defaults it has (with escape hatch in case there is some issue with that). This makes things better for users out of box.\n\nIt also makes us properly behaved, respecting user and system configuration. See https://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.html for more information.\n\nCloses #14690\n", "comments": [ { "body": "LGTM\n", "created_at": "2015-11-12T08:12:02Z" } ], "number": 14704, "title": "Handle system policy correctly" }
{ "body": "This commit refactors the handling of bad default permissions that come\r\nfrom the system security policy.\r\n\r\nRelates #14704", "number": 21735, "review_comments": [], "title": "Refactor handling for bad default permissions" }
{ "commits": [ { "message": "Refactor handling for bad default permissions\n\nThis commit refactors the handling of bad default permissions that come\nfrom the system security policy." } ], "files": [ { "diff": "@@ -21,17 +21,18 @@\n \n import org.elasticsearch.common.SuppressForbidden;\n \n-import java.net.SocketPermission;\n-import java.net.URL;\n import java.io.FilePermission;\n import java.io.IOException;\n+import java.net.SocketPermission;\n+import java.net.URL;\n import java.security.CodeSource;\n import java.security.Permission;\n import java.security.PermissionCollection;\n import java.security.Permissions;\n import java.security.Policy;\n import java.security.ProtectionDomain;\n import java.util.Map;\n+import java.util.function.Predicate;\n \n /** custom policy for union of static and dynamic permissions */\n final class ESPolicy extends Policy {\n@@ -133,18 +134,66 @@ public PermissionCollection getPermissions(CodeSource codesource) {\n \n // TODO: remove this hack when insecure defaults are removed from java\n \n+ /**\n+ * Wraps a bad default permission, applying a pre-implies to any permissions before checking if the wrapped bad default permission\n+ * implies a permission.\n+ */\n+ private static class BadDefaultPermission extends Permission {\n+\n+ private final Permission badDefaultPermission;\n+ private final Predicate<Permission> preImplies;\n+\n+ /**\n+ * Construct an instance with a pre-implies check to apply to desired permissions.\n+ *\n+ * @param badDefaultPermission the bad default permission to wrap\n+ * @param preImplies a test that is applied to a desired permission before checking if the bad default permission that\n+ * this instance wraps implies the desired permission\n+ */\n+ public BadDefaultPermission(final Permission badDefaultPermission, final Predicate<Permission> preImplies) {\n+ super(badDefaultPermission.getName());\n+ this.badDefaultPermission = badDefaultPermission;\n+ this.preImplies = preImplies;\n+ }\n+\n+ @Override\n+ public final boolean implies(Permission permission) {\n+ return preImplies.test(permission) && badDefaultPermission.implies(permission);\n+ }\n+\n+ @Override\n+ public final boolean equals(Object obj) {\n+ return badDefaultPermission.equals(obj);\n+ }\n+\n+ @Override\n+ public int hashCode() {\n+ return badDefaultPermission.hashCode();\n+ }\n+\n+ @Override\n+ public String getActions() {\n+ return badDefaultPermission.getActions();\n+ }\n+\n+ }\n+\n // default policy file states:\n // \"It is strongly recommended that you either remove this permission\n // from this policy file or further restrict it to code sources\n // that you specify, because Thread.stop() is potentially unsafe.\"\n // not even sure this method still works...\n- static final Permission BAD_DEFAULT_NUMBER_ONE = new RuntimePermission(\"stopThread\");\n+ private static final Permission BAD_DEFAULT_NUMBER_ONE = new BadDefaultPermission(new RuntimePermission(\"stopThread\"), p -> true);\n \n // default policy file states:\n // \"allows anyone to listen on dynamic ports\"\n // specified exactly because that is what we want, and fastest since it won't imply any\n // expensive checks for the implicit \"resolve\"\n- static final Permission BAD_DEFAULT_NUMBER_TWO = new SocketPermission(\"localhost:0\", \"listen\");\n+ private static final Permission BAD_DEFAULT_NUMBER_TWO =\n+ new BadDefaultPermission(\n+ new SocketPermission(\"localhost:0\", \"listen\"),\n+ // we apply this pre-implies test because some SocketPermission#implies calls do expensive reverse-DNS resolves\n+ p -> p instanceof SocketPermission && p.getActions().contains(\"listen\"));\n \n /**\n * Wraps the Java system policy, filtering out bad default permissions that\n@@ -159,7 +208,7 @@ static class SystemPolicy extends Policy {\n \n @Override\n public boolean implies(ProtectionDomain domain, Permission permission) {\n- if (BAD_DEFAULT_NUMBER_ONE.equals(permission) || BAD_DEFAULT_NUMBER_TWO.equals(permission)) {\n+ if (BAD_DEFAULT_NUMBER_ONE.implies(permission) || BAD_DEFAULT_NUMBER_TWO.implies(permission)) {\n return false;\n }\n return delegate.implies(domain, permission);", "filename": "core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.test.ESTestCase;\n \n import java.io.FilePermission;\n+import java.net.SocketPermission;\n import java.security.AllPermission;\n import java.security.CodeSource;\n import java.security.Permission;\n@@ -36,7 +37,7 @@\n * we don't allow messing with the policy\n */\n public class ESPolicyUnitTests extends ESTestCase {\n- /** \n+ /**\n * Test policy with null codesource.\n * <p>\n * This can happen when restricting privileges with doPrivileged,\n@@ -55,7 +56,7 @@ public void testNullCodeSource() throws Exception {\n assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission(\"foo\", \"read\")));\n }\n \n- /** \n+ /**\n * test with null location\n * <p>\n * its unclear when/if this happens, see https://bugs.openjdk.java.net/browse/JDK-8129972\n@@ -67,4 +68,15 @@ public void testNullLocation() throws Exception {\n assertFalse(policy.implies(new ProtectionDomain(new CodeSource(null, (Certificate[]) null), noPermissions),\n new FilePermission(\"foo\", \"read\")));\n }\n+\n+ public void testListen() {\n+ assumeTrue(\"test cannot run with security manager\", System.getSecurityManager() == null);\n+ final PermissionCollection noPermissions = new Permissions();\n+ final ESPolicy policy = new ESPolicy(noPermissions, Collections.emptyMap(), true);\n+ assertFalse(\n+ policy.implies(\n+ new ProtectionDomain(ESPolicyUnitTests.class.getProtectionDomain().getCodeSource(), noPermissions),\n+ new SocketPermission(\"localhost:\" + randomFrom(0, randomIntBetween(49152, 65535)), \"listen\")));\n+ }\n+\n }", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java", "status": "modified" } ] }
{ "body": "The `ignore_unavailable` request setting shouldn't ignore closed indices if a single index is specified in a search or broadcast request.\n\nPR for #7153\n", "comments": [ { "body": "I left a couple of minor comments @martijnvg . The one thing I am not sure about is that the PR addresses the case of closed indices, while the related issue might be referring to more usecases around the single index special case, like:\n\n```\ncurl -XGET localhost:9200/index1/_search?ignore_unavailable=true\n\n# fails if index1 is not there, although we said ignore_unavailable=true\n{\n \"error\" : \"IndexMissingException[[index1] missing]\",\n \"status\" : 404\n}\n```\n\nIs this something that we want to address as well?\n", "created_at": "2014-12-23T16:42:53Z" }, { "body": "@javanna I updated the PR, addressed the minor comments and added tests for the case a single index mentioned doesn't exist.\n", "created_at": "2014-12-23T17:34:58Z" }, { "body": "Thanks @mvg LGTM, the single missing index case I mentioned before was something I overlooked on my end, it works as expected already, so this PR addresses what it should addresses, sorry for the confusion ;)\n", "created_at": "2014-12-23T17:36:01Z" }, { "body": "This contradicts http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/multi-index.html\nI see no rationale for special handling of the single non-wildcard index case.\n", "created_at": "2015-02-12T09:58:07Z" } ], "number": 9047, "title": "`ignore_unavailable` shouldn't ignore closed indices" }
{ "body": "This is a followup to #21689 where we removed a misplaced try catch for IndexMissingException and IndexClosedException which was related to #9047 (at least for the index closed case). The code block within the change was moved as part of #20890, which made the catch redundant. It was somehow used before (e.g. in 5.0) but it doesn't seem that this catch had any effect. Added tests to verify that. In fact a specific catch added to the search api only would defeat the purpose of having common indices options that work throughout all our APIs.\r\n\r\nRelates to #21689", "number": 21701, "review_comments": [ { "body": "missing_index -> index_closed", "created_at": "2016-11-21T13:36:55Z" }, { "body": "argh, right I will fix thanks for catching this", "created_at": "2016-11-21T13:44:51Z" }, { "body": "fixed ;)", "created_at": "2016-11-21T13:48:08Z" } ], "title": "Add indices options tests to search api REST tests" }
{ "commits": [ { "message": "Add indices options tests to search api REST tests\n\nThis is a followup to #21689 where we removed a misplaced try catch for IndexMissingException and IndexClosedException which was related to #9047 (at least for the index closed case). The code block within the change was moved as part of #20890, which made the catch redundant. It was somehow used before (e.g. in 5.0) but it doesn't seem that this catch had any effect. Added tests to verify that. In fact a specific catch added to the search api only would defeat the purpose of having common indices options that work throughout all our APIs.\n\nRelates to #21689" } ], "files": [ { "diff": "@@ -0,0 +1,63 @@\n+---\n+\"Missing index date math with catch\":\n+\n+ - do:\n+ catch: /logstash-\\d{4}\\.\\d{2}\\.\\d{2}/\n+ search:\n+ index: <logstash-{now/M}>\n+\n+---\n+\"Missing index\":\n+\n+ - do:\n+ catch: missing\n+ search:\n+ index: missing_index\n+\n+ - do:\n+ search:\n+ index: missing_index\n+ ignore_unavailable: true\n+\n+ - match: {hits.total: 0}\n+\n+---\n+\"Closed index\":\n+\n+ - do:\n+ indices.create:\n+ index: index_closed\n+\n+ - do:\n+ indices.close:\n+ index: index_closed\n+\n+ - do:\n+ catch: /index_closed_exception/\n+ search:\n+ index: index_closed\n+\n+ - do:\n+ search:\n+ index: index_closed\n+ ignore_unavailable: true\n+\n+ - match: {hits.total: 0}\n+\n+ - do:\n+ search:\n+ index: index*\n+\n+ - match: {hits.total: 0}\n+\n+ - do:\n+ catch: missing\n+ search:\n+ index: index*\n+ allow_no_indices: false\n+\n+ - do:\n+ catch: /index_closed_exception/\n+ search:\n+ index: index*\n+ expand_wildcards: [\"open\",\"closed\"]", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search/80_indices_options.yaml", "status": "added" } ] }
{ "body": "PR #19416 added a safety mechanism to shard state fetching to only access the store when the shard lock can be acquired. This can lead to the following situation however where a shard has not fully shut down yet while the shard fetching is going on, resulting in a `ShardLockObtainFailedException`. `PrimaryShardAllocator` that decides where to allocate primary shards sees this exception and treats the shard as unusable. If this is the only shard copy in the cluster, the cluster stays red and a new shard fetching cycle will not be triggered as shard state fetching treats exceptions while opening the store as permanent failures.\r\n\r\nThis PR makes it so that `PrimaryShardAllocator` treats the locked shard as a possible allocation target (although with the least priority).", "comments": [ { "body": "I wanted to make sure I understood correctly - if the shard that has the lock exception is the only valid copy, so the allocator decides to allocate the primary to this (currently) locked shard. When the node receives the cluster state update that it must allocate the primary on itself, it will try to obtain the shard lock for 5 secs. If it fails to obtain the lock within 5 secs, the failure is sent to master, which will try to reallocate to the same node again. It will do this for up to 5 tries (by default) due to the MaxRetryAllocationDecider. So the node must release the lock on the shard within 5 tries, each try attempting for 5 secs. Is this understanding correct?\n", "created_at": "2016-11-18T18:09:27Z" }, { "body": "@abeyad correct (I tried it to confirm). We will have 5 iterations where 5 seconds are taken to obtain the shard lock while shard fetching and then 5 seconds to obtain the shard lock while trying to allocate the shard on the node, so 5 \\* 5 seconds for shard fetching + 4 \\* 5 seconds for shard allocation attempts = 45 seconds :-)\n\nTest code\n\n```\n prepareCreate(\"test\").setSettings(Settings.builder()\n .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)\n .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get();\n ensureGreen(\"test\");\n\n ClusterState state = client().admin().cluster().prepareState().get().getState();\n ShardRouting shardRouting = state.routingTable().shardRoutingTable(\"test\", 0).primaryShard();\n String nodeWithPrimary = shardRouting.currentNodeId();\n String node = state.nodes().get(nodeWithPrimary).getName();\n ShardId shardId = shardRouting.shardId();\n\n NodeEnvironment environment = internalCluster().getInstance(Node.class, node).getNodeEnvironment();\n IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);\n indicesService.getShardOrNull(shardId).failShard(\"because I can\", new RuntimeException(\"because I can\"));\n\n ShardLock shardLock = environment.shardLock(shardId, TimeValue.timeValueSeconds(5).millis());\n\n assertBusy(() -> {\n assertTrue(client().admin().cluster().prepareHealth(\"test\").get().getStatus() == ClusterHealthStatus.RED);\n }, 1, TimeUnit.MINUTES);\n\n ensureGreen(TimeValue.timeValueMinutes(3), \"test\");\n shardLock.close();\n```\n\nOutput:\n\n```\n/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/bin/java -ea -Didea.launcher.port=7532 \"-Didea.launcher.bin.path=/Applications/IntelliJ IDEA.app/Contents/bin\" -Didea.junit.sm_runner -Dfile.encoding=UTF-8 -classpath \"/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar:/Applications/IntelliJ IDEA.app/Contents/plugins/junit/lib/junit-rt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/lib/dt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/lib/javafx-mx.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/lib/jconsole.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/lib/packager.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/lib/sa-jdi.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/lib/tools.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/charsets.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/deploy.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/javaws.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/jce.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/jfr.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/jfxswt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/jsse.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/management-agent.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/plugin.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/resources.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/rt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/cldrdata.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/dnsns.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/jaccess.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/jfxrt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/localedata.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/nashorn.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/sunec.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/sunjce_provider.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/sunpkcs11.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home/jre/lib/ext/zipfs.jar:/Users/ywelsch/dev/elasticsearch/core/build-idea/classes/test:/Users/ywelsch/dev/elasticsearch/core/build-idea/classes/main:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-core/6.3.0/d3c87ea89e2f83e401f9cc7f14e4c43945f7f1e1/lucene-core-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-analyzers-common/6.3.0/494aed699af238c3872a6b65e17939e9cb7ddbe0/lucene-analyzers-common-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-backward-codecs/6.3.0/77dede7dff1b833ca2e92d8ab137edb209354d9b/lucene-backward-codecs-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-grouping/6.3.0/2c96d59e318ea66838aeb9c5cfb8b4d27b40953c/lucene-grouping-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-highlighter/6.3.0/4f154d8badfe47fe45503c18fb30f2177f758794/lucene-highlighter-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-join/6.3.0/79b898117dcfde2981ec6806e420ff218842eca8/lucene-join-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-memory/6.3.0/89edeb404e507d640cb13903acff6953199704a2/lucene-memory-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-misc/6.3.0/2d0e1f5a9df15ac911ad495bad5ea253ab50a9f/lucene-misc-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-queries/6.3.0/eb7938233c8103223069c7b5b5f785b4d20ddafa/lucene-queries-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-queryparser/6.3.0/e979fb02155cbe81a8d335d6dc41d2ef06be68b6/lucene-queryparser-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-sandbox/6.3.0/257387c45c6fa2b77fd6931751f93fdcd798ced4/lucene-sandbox-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-spatial/6.3.0/3cf5fe5402b5e34b240b73501c9e97a82428259e/lucene-spatial-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-spatial-extras/6.3.0/1b77ef3740dc885c62d5966fbe9aea1199d344fb/lucene-spatial-extras-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-spatial3d/6.3.0/aa94b4a8636b3633008640cc5155ad354aebcea5/lucene-spatial3d-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-suggest/6.3.0/ed5d8ee5cd7edcad5d4ffca2b4540ccc844e9bb0/lucene-suggest-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.elasticsearch/securesm/1.1/1e423447d020041534be94c0f31a49fbdc1f2950/securesm-1.1.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/net.sf.jopt-simple/jopt-simple/5.0.2/98cafc6081d5632b61be2c9e60650b64ddbc637c/jopt-simple-5.0.2.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.carrotsearch/hppc/0.7.1/8b5057f74ea378c0150a1860874a3ebdcb713767/hppc-0.7.1.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/joda-time/joda-time/2.9.5/5f01da7306363fad2028b916f3eab926262de928/joda-time-2.9.5.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.yaml/snakeyaml/1.15/3b132bea69e8ee099f416044970997bde80f4ea6/snakeyaml-1.15.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.fasterxml.jackson.core/jackson-core/2.8.1/fd13b1c033741d48291315c6370f7d475a42dccf/jackson-core-2.8.1.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.fasterxml.jackson.dataformat/jackson-dataformat-smile/2.8.1/5b73867bc12224946fc67fc8d49d9f5e698d7f/jackson-dataformat-smile-2.8.1.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.fasterxml.jackson.dataformat/jackson-dataformat-yaml/2.8.1/eb63166c723b0b4b9fb5298fca232a2f6612ec34/jackson-dataformat-yaml-2.8.1.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.fasterxml.jackson.dataformat/jackson-dataformat-cbor/2.8.1/3a6fb7e75c9972559a78cf5cfc5a48a41a13ea40/jackson-dataformat-cbor-2.8.1.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.tdunning/t-digest/3.0/84ccf145ac2215e6bfa63baa3101c0af41017cfc/t-digest-3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.hdrhistogram/HdrHistogram/2.1.6/7495feb7f71ee124bd2a7e7d83590e296d71d80e/HdrHistogram-2.1.6.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.locationtech.spatial4j/spatial4j/0.6/21b15310bddcfd8c72611c180f20cf23279809a3/spatial4j-0.6.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.vividsolutions/jts/1.13/3ccfb9b60f04d71add996a666ceb8902904fd805/jts-1.13.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.logging.log4j/log4j-api/2.7/8de00e382a817981b737be84cb8def687d392963/log4j-api-2.7.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.logging.log4j/log4j-core/2.7/a3f2b4e64c61a7fc1ed8f1e5ba371933404ed98a/log4j-core-2.7.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.logging.log4j/log4j-1.2-api/2.7/39f4e6c2d68d4ef8fd4b0883d165682dedd5be52/log4j-1.2-api-2.7.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/net.java.dev.jna/jna/4.2.2/5012450aee579c3118ff09461d5ce210e0cdc2a9/jna-4.2.2.jar:/Users/ywelsch/dev/elasticsearch/test/framework/build-idea/classes/main:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/com.carrotsearch.randomizedtesting/randomizedtesting-runner/2.4.0/222eb23dd6f45541acf6a5ac69cd9e9bdce25d2/randomizedtesting-runner-2.4.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/junit/junit/4.11/4e031bb61df09069aeb2bffb4019e7a5034a4ee0/junit-4.11.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.hamcrest/hamcrest-all/1.3/63a21ebc981131004ad02e0434e799fd7f3a8d5a/hamcrest-all-1.3.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-test-framework/6.3.0/a6ad70bafbabbc82830f7e0b1d6ac1f4d74831d7/lucene-test-framework-6.3.0.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-codecs/6.3.0/8e58e160a4751200987e60a365f4370d88fd9942/lucene-codecs-6.3.0.jar:/Users/ywelsch/dev/elasticsearch/client/rest/build-idea/classes/main:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.httpcomponents/httpclient/4.5.2/733db77aa8d9b2d68015189df76ab06304406e50/httpclient-4.5.2.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.httpcomponents/httpcore/4.4.5/e7501a1b34325abb00d17dde96150604a0658b54/httpcore-4.4.5.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/commons-logging/commons-logging/1.1.3/f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f/commons-logging-1.1.3.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/commons-codec/commons-codec/1.10/4b95f4897fa13f2cd904aee711aeafc0c5295cd8/commons-codec-1.10.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.elasticsearch/securemock/1.2/98201d4ad5ac93f6b415ae9172d52b5e7cda490e/securemock-1.2.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.httpcomponents/httpasyncclient/4.1.2/95aa3e6fb520191a0970a73cf09f62948ee614be/httpasyncclient-4.1.2.jar:/Users/ywelsch/.gradle/caches/modules-2/files-2.1/org.apache.httpcomponents/httpcore-nio/4.4.5/f4be009e7505f6ceddf21e7960c759f413f15056/httpcore-nio-4.4.5.jar\" com.intellij.rt.execution.application.AppMain com.intellij.rt.execution.junit.JUnitStarter -ideVersion5 org.elasticsearch.action.admin.indices.create.CreateIndexIT,testWeirdScenario\n[2016-11-18T20:30:44,217][WARN ][o.e.b.JNANatives ] Unable to lock JVM Memory: error=78, reason=Function not implemented\n[2016-11-18T20:30:44,223][WARN ][o.e.b.JNANatives ] This can result in part of the JVM being swapped out.\n[2016-11-18T21:30:45,827][INFO ][o.e.a.a.i.c.CreateIndexIT] [CreateIndexIT#testWeirdScenario]: setup test\n[2016-11-18T21:30:45,847][INFO ][o.e.t.InternalTestCluster] Setup InternalTestCluster [TEST-CHILD_VM=[0]-CLUSTER_SEED=[167703776559476503]-HASH=[2E357FE8EAD77]-cluster] with seed [253CDB23D51AF17] using [0] dedicated masters, [2] (data) nodes and [1] coord only nodes\n[2016-11-18T21:30:46,390][INFO ][o.e.n.Node ] [node_t0] initializing ...\n[2016-11-18T21:30:46,488][INFO ][o.e.e.NodeEnvironment ] [node_t0] using [1] data paths, mounts [[/ (/dev/disk1)]], net usable_space [244.6gb], net total_space [464.7gb], spins? [unknown], types [hfs]\n[2016-11-18T21:30:46,489][INFO ][o.e.e.NodeEnvironment ] [node_t0] heap size [3.5gb], compressed ordinary object pointers [true]\n[2016-11-18T21:30:46,491][INFO ][o.e.n.Node ] [node_t0] version[6.0.0-alpha1-SNAPSHOT], pid[40959], build[Unknown/Unknown], OS[Mac OS X/10.12.1/x86_64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23]\n[2016-11-18T21:30:46,492][WARN ][o.e.n.Node ] [node_t0] version [6.0.0-alpha1-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production\n[2016-11-18T21:30:46,503][INFO ][o.e.p.PluginsService ] [node_t0] no modules loaded\n[2016-11-18T21:30:46,504][INFO ][o.e.p.PluginsService ] [node_t0] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin]\n[2016-11-18T21:30:46,504][INFO ][o.e.p.PluginsService ] [node_t0] loaded plugin [org.elasticsearch.test.discovery.TestZenDiscovery$TestPlugin]\n[2016-11-18T21:30:46,504][INFO ][o.e.p.PluginsService ] [node_t0] loaded plugin [org.elasticsearch.transport.MockTcpTransportPlugin]\n[2016-11-18T21:30:49,670][INFO ][o.e.n.Node ] [node_t0] initialized\n[2016-11-18T21:30:49,677][INFO ][o.e.n.Node ] [node_t1] initializing ...\n[2016-11-18T21:30:49,681][INFO ][o.e.e.NodeEnvironment ] [node_t1] using [1] data paths, mounts [[/ (/dev/disk1)]], net usable_space [244.6gb], net total_space [464.7gb], spins? [unknown], types [hfs]\n[2016-11-18T21:30:49,681][INFO ][o.e.e.NodeEnvironment ] [node_t1] heap size [3.5gb], compressed ordinary object pointers [true]\n[2016-11-18T21:30:49,681][INFO ][o.e.n.Node ] [node_t1] version[6.0.0-alpha1-SNAPSHOT], pid[40959], build[Unknown/Unknown], OS[Mac OS X/10.12.1/x86_64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23]\n[2016-11-18T21:30:49,681][WARN ][o.e.n.Node ] [node_t1] version [6.0.0-alpha1-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production\n[2016-11-18T21:30:49,682][INFO ][o.e.p.PluginsService ] [node_t1] no modules loaded\n[2016-11-18T21:30:49,682][INFO ][o.e.p.PluginsService ] [node_t1] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin]\n[2016-11-18T21:30:49,682][INFO ][o.e.p.PluginsService ] [node_t1] loaded plugin [org.elasticsearch.test.discovery.TestZenDiscovery$TestPlugin]\n[2016-11-18T21:30:49,682][INFO ][o.e.p.PluginsService ] [node_t1] loaded plugin [org.elasticsearch.transport.MockTcpTransportPlugin]\n[2016-11-18T21:30:49,765][INFO ][o.e.n.Node ] [node_t1] initialized\n[2016-11-18T21:30:49,769][INFO ][o.e.n.Node ] [node_tc2] initializing ...\n[2016-11-18T21:30:49,777][INFO ][o.e.e.NodeEnvironment ] [node_tc2] using [1] data paths, mounts [[/ (/dev/disk1)]], net usable_space [244.6gb], net total_space [464.7gb], spins? [unknown], types [hfs]\n[2016-11-18T21:30:49,777][INFO ][o.e.e.NodeEnvironment ] [node_tc2] heap size [3.5gb], compressed ordinary object pointers [true]\n[2016-11-18T21:30:49,779][INFO ][o.e.n.Node ] [node_tc2] version[6.0.0-alpha1-SNAPSHOT], pid[40959], build[Unknown/Unknown], OS[Mac OS X/10.12.1/x86_64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23]\n[2016-11-18T21:30:49,779][WARN ][o.e.n.Node ] [node_tc2] version [6.0.0-alpha1-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production\n[2016-11-18T21:30:49,780][INFO ][o.e.p.PluginsService ] [node_tc2] no modules loaded\n[2016-11-18T21:30:49,780][INFO ][o.e.p.PluginsService ] [node_tc2] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin]\n[2016-11-18T21:30:49,781][INFO ][o.e.p.PluginsService ] [node_tc2] loaded plugin [org.elasticsearch.test.discovery.TestZenDiscovery$TestPlugin]\n[2016-11-18T21:30:49,781][INFO ][o.e.p.PluginsService ] [node_tc2] loaded plugin [org.elasticsearch.transport.MockTcpTransportPlugin]\n[2016-11-18T21:30:49,880][INFO ][o.e.n.Node ] [node_tc2] initialized\n[2016-11-18T21:30:49,898][INFO ][o.e.n.Node ] [node_t0] starting ...\n[2016-11-18T21:30:49,966][INFO ][o.e.t.TransportService ] [node_t0] publish_address {127.0.0.1:9400}, bound_addresses {[fe80::1]:9400}, {[::1]:9400}, {127.0.0.1:9400}\n[2016-11-18T21:30:50,055][INFO ][o.e.n.Node ] [node_t0] started\n[2016-11-18T21:30:50,055][INFO ][o.e.n.Node ] [node_t1] starting ...\n[2016-11-18T21:30:50,066][INFO ][o.e.t.d.MockZenPing ] [node_t0] pinging using mock zen ping\n[2016-11-18T21:30:50,074][INFO ][o.e.t.d.MockZenPing ] [node_t0] pinging using mock zen ping\n[2016-11-18T21:30:50,088][INFO ][o.e.t.TransportService ] [node_t1] publish_address {127.0.0.1:9401}, bound_addresses {[fe80::1]:9401}, {[::1]:9401}, {127.0.0.1:9401}\n[2016-11-18T21:30:50,104][INFO ][o.e.n.Node ] [node_t1] started\n[2016-11-18T21:30:50,112][INFO ][o.e.t.d.MockZenPing ] [node_t1] pinging using mock zen ping\n[2016-11-18T21:30:50,114][INFO ][o.e.n.Node ] [node_tc2] starting ...\n[2016-11-18T21:30:50,186][INFO ][o.e.t.TransportService ] [node_tc2] publish_address {127.0.0.1:9402}, bound_addresses {[fe80::1]:9402}, {[::1]:9402}, {127.0.0.1:9402}\n[2016-11-18T21:30:50,191][INFO ][o.e.t.d.MockZenPing ] [node_tc2] pinging using mock zen ping\n[2016-11-18T21:30:50,243][INFO ][o.e.c.s.ClusterService ] [node_t0] new_master {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}, added {{node_t1}{k-ggqJ2JRi-ql8PwqzzPBg}{xIGyXW7dSEWuXsHS1f8pbA}{127.0.0.1}{127.0.0.1:9401},}, reason: zen-disco-elected-as-master ([1] nodes joined)[{node_t1}{k-ggqJ2JRi-ql8PwqzzPBg}{xIGyXW7dSEWuXsHS1f8pbA}{127.0.0.1}{127.0.0.1:9401}]\n[2016-11-18T21:30:50,268][INFO ][o.e.c.s.ClusterService ] [node_t1] detected_master {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}, added {{node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400},}, reason: zen-disco-receive(from master [master {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400} committed version [1]])\n[2016-11-18T21:30:50,282][INFO ][o.e.c.s.ClusterService ] [node_t0] added {{node_tc2}{gN9UaHrxRpS0T2AJ8hJuGQ}{wVYUDMB0TtCoR3gWIdWXcA}{127.0.0.1}{127.0.0.1:9402},}, reason: zen-disco-node-join[{node_tc2}{gN9UaHrxRpS0T2AJ8hJuGQ}{wVYUDMB0TtCoR3gWIdWXcA}{127.0.0.1}{127.0.0.1:9402}]\n[2016-11-18T21:30:50,328][INFO ][o.e.c.s.ClusterService ] [node_t1] added {{node_tc2}{gN9UaHrxRpS0T2AJ8hJuGQ}{wVYUDMB0TtCoR3gWIdWXcA}{127.0.0.1}{127.0.0.1:9402},}, reason: zen-disco-receive(from master [master {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400} committed version [2]])\n[2016-11-18T21:30:50,328][INFO ][o.e.c.s.ClusterService ] [node_tc2] detected_master {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}, added {{node_t1}{k-ggqJ2JRi-ql8PwqzzPBg}{xIGyXW7dSEWuXsHS1f8pbA}{127.0.0.1}{127.0.0.1:9401},{node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400},}, reason: zen-disco-receive(from master [master {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400} committed version [2]])\n[2016-11-18T21:30:50,342][INFO ][o.e.n.Node ] [node_tc2] started\n[2016-11-18T21:30:50,362][INFO ][o.e.g.GatewayService ] [node_t0] recovered [0] indices into cluster_state\n[2016-11-18T21:30:50,361][INFO ][o.e.p.PluginsService ] [transport_client_node_t1] no modules loaded\n[2016-11-18T21:30:50,362][INFO ][o.e.p.PluginsService ] [transport_client_node_t1] loaded plugin [org.elasticsearch.transport.MockTcpTransportPlugin]\n[2016-11-18T21:30:50,502][INFO ][o.e.a.a.i.c.CreateIndexIT] test using _default_ mappings: [{\"_default_\":{}}]\n[2016-11-18T21:30:50,823][INFO ][o.e.a.a.i.c.CreateIndexIT] [CreateIndexIT#testWeirdScenario]: starting test\n[2016-11-18T21:30:50,824][INFO ][o.e.p.PluginsService ] [transport_client_node_t0] no modules loaded\n[2016-11-18T21:30:50,824][INFO ][o.e.p.PluginsService ] [transport_client_node_t0] loaded plugin [org.elasticsearch.transport.MockTcpTransportPlugin]\n[2016-11-18T21:30:50,923][INFO ][o.e.c.m.MetaDataCreateIndexService] [node_t0] [test] creating index, cause [api], templates [random_index_template], shards [1]/[0], mappings [_default_]\n[2016-11-18T21:30:51,215][INFO ][o.e.c.r.a.AllocationService] [node_t0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[test][0]] ...]).\n[2016-11-18T21:30:51,239][INFO ][o.e.p.PluginsService ] [transport_client_node_tc2] no modules loaded\n[2016-11-18T21:30:51,239][INFO ][o.e.p.PluginsService ] [transport_client_node_tc2] loaded plugin [org.elasticsearch.transport.MockTcpTransportPlugin]\n[2016-11-18T21:30:51,279][WARN ][o.e.i.e.Engine ] [node_t0] [test][0] failed engine [because I can]\njava.lang.RuntimeException: because I can\n at org.elasticsearch.action.admin.indices.create.CreateIndexIT.testWeirdScenario(CreateIndexIT.java:90) ~[test/:?]\n at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]\n at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?]\n at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]\n at java.lang.reflect.Method.invoke(Method.java:497) ~[?:1.8.0_60]\n at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367) ~[randomizedtesting-runner-2.4.0.jar:?]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:30:51,295][WARN ][o.e.i.c.IndicesClusterStateService] [node_t0] [[test][0]] marking and sending shard failed due to [shard failure, reason [because I can]]\njava.lang.RuntimeException: because I can\n at org.elasticsearch.action.admin.indices.create.CreateIndexIT.testWeirdScenario(CreateIndexIT.java:90) ~[test/:?]\n at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]\n at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?]\n at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]\n at java.lang.reflect.Method.invoke(Method.java:497) ~[?:1.8.0_60]\n at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367) ~[randomizedtesting-runner-2.4.0.jar:?]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:30:51,303][WARN ][o.e.c.a.s.ShardStateAction] [node_t0] [test][0] received shard failed for shard id [[test][0]], allocation id [gyqfvt5TQ2iaA5T66PDmpw], primary term [0], message [shard failure, reason [because I can]], failure [RuntimeException[because I can]]\njava.lang.RuntimeException: because I can\n at org.elasticsearch.action.admin.indices.create.CreateIndexIT.testWeirdScenario(CreateIndexIT.java:90) ~[test/:?]\n at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]\n at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?]\n at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]\n at java.lang.reflect.Method.invoke(Method.java:497) ~[?:1.8.0_60]\n at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) ~[lucene-test-framework-6.3.0.jar:6.3.0 a66a44513ee8191e25b477372094bfa846450316 - shalin - 2016-11-02 19:47:12]\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ~[randomizedtesting-runner-2.4.0.jar:?]\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367) ~[randomizedtesting-runner-2.4.0.jar:?]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:30:51,342][INFO ][o.e.c.r.a.AllocationService] [node_t0] Cluster health status changed from [GREEN] to [RED] (reason: [shards failed [[test][0]] ...]).\n[2016-11-18T21:30:56,359][ERROR][o.e.g.TransportNodesListGatewayStartedShards] [node_t0] [test][0] unable to acquire shard lock\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.store.Store.tryOpenIndex(Store.java:418) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:144) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:61) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction.nodeOperation(TransportNodesAction.java:145) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:270) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:266) ~[main/:?]\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) ~[main/:?]\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:569) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:31:01,381][WARN ][o.e.i.c.IndicesClusterStateService] [node_t0] [[test][0]] marking and sending shard failed due to [failed to create shard]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:01,384][WARN ][o.e.c.a.s.ShardStateAction] [node_t0] [test][0] received shard failed for shard id [[test][0]], allocation id [gyqfvt5TQ2iaA5T66PDmpw], primary term [0], message [failed to create shard], failure [IOException[failed to obtain in-memory shard lock]; nested: ShardLockObtainFailedException[[test][0]: obtaining shard lock timed out after 5000ms]; ]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:06,397][ERROR][o.e.g.TransportNodesListGatewayStartedShards] [node_t0] [test][0] unable to acquire shard lock\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.store.Store.tryOpenIndex(Store.java:418) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:144) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:61) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction.nodeOperation(TransportNodesAction.java:145) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:270) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:266) ~[main/:?]\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) ~[main/:?]\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:569) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:31:11,415][WARN ][o.e.i.c.IndicesClusterStateService] [node_t0] [[test][0]] marking and sending shard failed due to [failed to create shard]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:11,416][WARN ][o.e.c.a.s.ShardStateAction] [node_t0] [test][0] received shard failed for shard id [[test][0]], allocation id [gyqfvt5TQ2iaA5T66PDmpw], primary term [0], message [failed to create shard], failure [IOException[failed to obtain in-memory shard lock]; nested: ShardLockObtainFailedException[[test][0]: obtaining shard lock timed out after 5000ms]; ]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:16,442][ERROR][o.e.g.TransportNodesListGatewayStartedShards] [node_t0] [test][0] unable to acquire shard lock\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.store.Store.tryOpenIndex(Store.java:418) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:144) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:61) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction.nodeOperation(TransportNodesAction.java:145) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:270) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:266) ~[main/:?]\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) ~[main/:?]\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:569) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:31:21,461][WARN ][o.e.i.c.IndicesClusterStateService] [node_t0] [[test][0]] marking and sending shard failed due to [failed to create shard]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:21,462][WARN ][o.e.c.a.s.ShardStateAction] [node_t0] [test][0] received shard failed for shard id [[test][0]], allocation id [gyqfvt5TQ2iaA5T66PDmpw], primary term [0], message [failed to create shard], failure [IOException[failed to obtain in-memory shard lock]; nested: ShardLockObtainFailedException[[test][0]: obtaining shard lock timed out after 5000ms]; ]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:26,474][ERROR][o.e.g.TransportNodesListGatewayStartedShards] [node_t0] [test][0] unable to acquire shard lock\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.store.Store.tryOpenIndex(Store.java:418) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:144) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:61) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction.nodeOperation(TransportNodesAction.java:145) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:270) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:266) ~[main/:?]\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) ~[main/:?]\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:569) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:31:31,495][WARN ][o.e.i.c.IndicesClusterStateService] [node_t0] [[test][0]] marking and sending shard failed due to [failed to create shard]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:31,496][WARN ][o.e.c.a.s.ShardStateAction] [node_t0] [test][0] received shard failed for shard id [[test][0]], allocation id [gyqfvt5TQ2iaA5T66PDmpw], primary term [0], message [failed to create shard], failure [IOException[failed to obtain in-memory shard lock]; nested: ShardLockObtainFailedException[[test][0]: obtaining shard lock timed out after 5000ms]; ]\njava.io.IOException: failed to obtain in-memory shard lock\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:369) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:513) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.createShard(IndicesService.java:147) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createShard(IndicesClusterStateService.java:539) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(IndicesClusterStateService.java:516) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:205) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: org.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:298) ~[main/:?]\n ... 13 more\n[2016-11-18T21:31:36,508][ERROR][o.e.g.TransportNodesListGatewayStartedShards] [node_t0] [test][0] unable to acquire shard lock\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.index.store.Store.tryOpenIndex(Store.java:418) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:144) ~[main/:?]\n at org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.nodeOperation(TransportNodesListGatewayStartedShards.java:61) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction.nodeOperation(TransportNodesAction.java:145) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:270) ~[main/:?]\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:266) ~[main/:?]\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) ~[main/:?]\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:569) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:33:51,399][INFO ][o.e.a.a.i.c.CreateIndexIT] ensureGreen timed out, cluster state:\ncluster uuid: -69XXLcyTXyBzLCeq-LCkQ\nversion: 21\nstate uuid: H1q20JyeRv68cn1FlhZDGg\nfrom_diff: false\nmeta data version: 9\n [test/EwgEjaZoRm6lRojAS1by4A]: v[8]\n 0: p_term [6], isa_ids [gyqfvt5TQ2iaA5T66PDmpw]\nnodes: \n {node_t1}{k-ggqJ2JRi-ql8PwqzzPBg}{xIGyXW7dSEWuXsHS1f8pbA}{127.0.0.1}{127.0.0.1:9401}\n {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}, master\n {node_tc2}{gN9UaHrxRpS0T2AJ8hJuGQ}{wVYUDMB0TtCoR3gWIdWXcA}{127.0.0.1}{127.0.0.1:9402}\nrouting_table (version 18):\n-- index [[test/EwgEjaZoRm6lRojAS1by4A]]\n----shard_id [test][0]\n--------[test][0], node[null], [P], recovery_source[existing recovery], s[UNASSIGNED], unassigned_info[[reason=ALLOCATION_FAILED], at[2016-11-18T19:31:31.499Z], failed_attempts[5], delayed=false, details[failed to create shard, failure IOException[failed to obtain in-memory shard lock]; nested: NotSerializableExceptionWrapper[shard_lock_obtain_failed_exception: [test][0]: obtaining shard lock timed out after 5000ms]; ], allocation_status[deciders_no]]\n\nrouting_nodes:\n-----node_id[Smft58veRkeHiF6nXwjwTg][V]\n-----node_id[k-ggqJ2JRi-ql8PwqzzPBg][V]\n---- unassigned\n--------[test][0], node[null], [P], recovery_source[existing recovery], s[UNASSIGNED], unassigned_info[[reason=ALLOCATION_FAILED], at[2016-11-18T19:31:31.499Z], failed_attempts[5], delayed=false, details[failed to create shard, failure IOException[failed to obtain in-memory shard lock]; nested: NotSerializableExceptionWrapper[shard_lock_obtain_failed_exception: [test][0]: obtaining shard lock timed out after 5000ms]; ], allocation_status[deciders_no]]\n\ntasks: (0):\n\n[2016-11-18T21:33:51,399][INFO ][o.e.a.a.i.c.CreateIndexIT] [CreateIndexIT#testWeirdScenario]: finished test\n[2016-11-18T21:33:51,400][INFO ][o.e.a.a.i.c.CreateIndexIT] [CreateIndexIT#testWeirdScenario]: cleaning up after test\n[2016-11-18T21:33:51,467][WARN ][o.e.i.IndicesService ] [node_t0] org.elasticsearch.indices.IndicesService$$Lambda$1323/1008235371@45d780ea\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: obtaining shard lock timed out after 0ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:685) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.lockAllForIndex(NodeEnvironment.java:550) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.deleteIndexDirectorySafe(NodeEnvironment.java:501) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.deleteIndexStoreIfDeletionAllowed(IndicesService.java:702) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.deleteIndexStore(IndicesService.java:689) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.deleteIndexStore(IndicesService.java:684) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.deleteUnassignedIndex(IndicesService.java:652) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.deleteIndices(IndicesClusterStateService.java:264) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:193) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService.runTasksForExecutor(ClusterService.java:780) ~[main/:?]\n at org.elasticsearch.cluster.service.ClusterService$UpdateTask.run(ClusterService.java:965) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:458) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:238) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:201) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\n[2016-11-18T21:33:51,506][INFO ][o.e.n.Node ] [node_t0] stopping ...\n[2016-11-18T21:33:51,509][INFO ][o.e.t.d.TestZenDiscovery ] [node_t1] master_left [{node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}], reason [shut_down]\n[2016-11-18T21:33:51,510][WARN ][o.e.t.d.TestZenDiscovery ] [node_t1] master left (reason = shut_down), current nodes: nodes: \n {node_t1}{k-ggqJ2JRi-ql8PwqzzPBg}{xIGyXW7dSEWuXsHS1f8pbA}{127.0.0.1}{127.0.0.1:9401}, local\n {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}, master\n {node_tc2}{gN9UaHrxRpS0T2AJ8hJuGQ}{wVYUDMB0TtCoR3gWIdWXcA}{127.0.0.1}{127.0.0.1:9402}\n\n[2016-11-18T21:33:51,511][INFO ][o.e.t.d.MockZenPing ] [node_t1] pinging using mock zen ping\n[2016-11-18T21:33:51,517][INFO ][o.e.t.d.TestZenDiscovery ] [node_tc2] master_left [{node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}], reason [transport disconnected]\n[2016-11-18T21:33:51,518][WARN ][o.e.t.d.TestZenDiscovery ] [node_tc2] master left (reason = transport disconnected), current nodes: nodes: \n {node_t1}{k-ggqJ2JRi-ql8PwqzzPBg}{xIGyXW7dSEWuXsHS1f8pbA}{127.0.0.1}{127.0.0.1:9401}\n {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400}, master\n {node_tc2}{gN9UaHrxRpS0T2AJ8hJuGQ}{wVYUDMB0TtCoR3gWIdWXcA}{127.0.0.1}{127.0.0.1:9402}, local\n\n[2016-11-18T21:33:51,520][INFO ][o.e.t.d.MockZenPing ] [node_tc2] pinging using mock zen ping\n[2016-11-18T21:33:51,521][INFO ][o.e.n.Node ] [node_t0] stopped\n[2016-11-18T21:33:51,521][INFO ][o.e.n.Node ] [node_t0] closing ...\n[2016-11-18T21:33:54,516][INFO ][o.e.t.d.MockZenPing ] [node_t1] pinging using mock zen ping\n[2016-11-18T21:33:54,526][INFO ][o.e.t.d.MockZenPing ] [node_tc2] pinging using mock zen ping\n[2016-11-18T21:33:57,518][INFO ][o.e.t.d.MockZenPing ] [node_t1] pinging using mock zen ping\n[2016-11-18T21:33:57,530][INFO ][o.e.t.d.MockZenPing ] [node_tc2] pinging using mock zen ping\n[2016-11-18T21:34:00,107][WARN ][o.e.c.NodeConnectionsService] [node_t1] failed to connect to node {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400} (tried [1] times)\norg.elasticsearch.transport.ConnectTransportException: [node_t0][127.0.0.1:9400] general node connection failure\n at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:431) ~[main/:?]\n at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:387) ~[main/:?]\n at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:290) ~[main/:?]\n at org.elasticsearch.cluster.NodeConnectionsService.validateNodeConnected(NodeConnectionsService.java:113) ~[main/:?]\n at org.elasticsearch.cluster.NodeConnectionsService$ConnectionChecker.doRun(NodeConnectionsService.java:142) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: java.net.ConnectException: Connection refused\n at java.net.PlainSocketImpl.socketConnect(Native Method) ~[?:1.8.0_60]\n at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350) ~[?:1.8.0_60]\n at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206) ~[?:1.8.0_60]\n at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188) ~[?:1.8.0_60]\n at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) ~[?:1.8.0_60]\n at java.net.Socket.connect(Socket.java:589) ~[?:1.8.0_60]\n at org.elasticsearch.transport.MockTcpTransport.connectToChannels(MockTcpTransport.java:189) ~[main/:?]\n at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:413) ~[main/:?]\n ... 9 more\n[2016-11-18T21:34:00,191][WARN ][o.e.c.NodeConnectionsService] [node_tc2] failed to connect to node {node_t0}{Smft58veRkeHiF6nXwjwTg}{RLXdAPCgQm2IV1bb4I7Ndw}{127.0.0.1}{127.0.0.1:9400} (tried [1] times)\norg.elasticsearch.transport.ConnectTransportException: [node_t0][127.0.0.1:9400] general node connection failure\n at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:431) ~[main/:?]\n at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:387) ~[main/:?]\n at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:290) ~[main/:?]\n at org.elasticsearch.cluster.NodeConnectionsService.validateNodeConnected(NodeConnectionsService.java:113) ~[main/:?]\n at org.elasticsearch.cluster.NodeConnectionsService$ConnectionChecker.doRun(NodeConnectionsService.java:142) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: java.net.ConnectException: Connection refused\n at java.net.PlainSocketImpl.socketConnect(Native Method) ~[?:1.8.0_60]\n at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350) ~[?:1.8.0_60]\n at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206) ~[?:1.8.0_60]\n at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188) ~[?:1.8.0_60]\n at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) ~[?:1.8.0_60]\n at java.net.Socket.connect(Socket.java:589) ~[?:1.8.0_60]\n at org.elasticsearch.transport.MockTcpTransport.connectToChannels(MockTcpTransport.java:189) ~[main/:?]\n at org.elasticsearch.transport.TcpTransport.connectToNode(TcpTransport.java:413) ~[main/:?]\n ... 9 more\n[2016-11-18T21:34:00,521][INFO ][o.e.t.d.MockZenPing ] [node_t1] pinging using mock zen ping\n[2016-11-18T21:34:00,537][INFO ][o.e.t.d.MockZenPing ] [node_tc2] pinging using mock zen ping\n[2016-11-18T21:34:01,534][INFO ][o.e.n.Node ] [node_t0] closed\n[2016-11-18T21:34:01,534][WARN ][o.e.i.c.IndicesClusterStateService] [node_t0] [[test/EwgEjaZoRm6lRojAS1by4A]] failed to complete pending deletion for index\norg.elasticsearch.env.ShardLockObtainFailedException: [test][0]: thread interrupted while trying to obtain shard lock\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:690) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:604) ~[main/:?]\n at org.elasticsearch.env.NodeEnvironment.lockAllForIndex(NodeEnvironment.java:550) ~[main/:?]\n at org.elasticsearch.indices.IndicesService.processPendingDeletes(IndicesService.java:977) ~[main/:?]\n at org.elasticsearch.indices.cluster.IndicesClusterStateService$2.doRun(IndicesClusterStateService.java:295) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:527) ~[main/:?]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[main/:?]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_60]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_60]\n at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_60]\nCaused by: java.lang.InterruptedException\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1039) ~[?:1.8.0_60]\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) ~[?:1.8.0_60]\n at java.util.concurrent.Semaphore.tryAcquire(Semaphore.java:409) ~[?:1.8.0_60]\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:684) ~[main/:?]\n ... 9 more\n[2016-11-18T21:34:01,541][INFO ][o.e.n.Node ] [node_t1] stopping ...\n[2016-11-18T21:34:01,541][INFO ][o.e.t.d.MockZenPing ] [node_tc2] pinging using mock zen ping\n[2016-11-18T21:34:01,555][INFO ][o.e.n.Node ] [node_t1] stopped\n[2016-11-18T21:34:01,555][INFO ][o.e.n.Node ] [node_t1] closing ...\n[2016-11-18T21:34:01,559][INFO ][o.e.n.Node ] [node_t1] closed\n[2016-11-18T21:34:01,562][INFO ][o.e.n.Node ] [node_tc2] stopping ...\n[2016-11-18T21:34:01,567][INFO ][o.e.n.Node ] [node_tc2] stopped\n[2016-11-18T21:34:01,567][INFO ][o.e.n.Node ] [node_tc2] closing ...\n[2016-11-18T21:34:01,569][INFO ][o.e.n.Node ] [node_tc2] closed\n[2016-11-18T21:34:01,569][INFO ][o.e.a.a.i.c.CreateIndexIT] [CreateIndexIT#testWeirdScenario]: cleaned up after test\n\njava.lang.AssertionError: timed out waiting for green state\n\n at __randomizedtesting.SeedInfo.seed([9FA981A46E11DF3D:271547EC56C03403]:0)\n at org.junit.Assert.fail(Assert.java:88)\n at org.elasticsearch.test.ESIntegTestCase.ensureColor(ESIntegTestCase.java:925)\n at org.elasticsearch.test.ESIntegTestCase.ensureGreen(ESIntegTestCase.java:891)\n at org.elasticsearch.action.admin.indices.create.CreateIndexIT.testWeirdScenario(CreateIndexIT.java:98)\n at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n at java.lang.reflect.Method.invoke(Method.java:497)\n at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)\n at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)\n at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)\n at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)\n at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)\n at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811)\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462)\n at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)\n at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)\n at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)\n at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)\n at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)\n at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)\n at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)\n at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)\n at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)\n at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)\n at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)\n at java.lang.Thread.run(Thread.java:745)\n\nREPRODUCE WITH: gradle null -Dtests.seed=9FA981A46E11DF3D -Dtests.class=org.elasticsearch.action.admin.indices.create.CreateIndexIT -Dtests.method=\"testWeirdScenario\" -Dtests.locale=en-AU -Dtests.timezone=Europe/Istanbul\nNOTE: leaving temporary files on disk at: /private/var/folders/68/3gzf12zs4qb0q_gfjw5lx1fm0000gn/T/org.elasticsearch.action.admin.indices.create.CreateIndexIT_9FA981A46E11DF3D-001\nNOTE: test params are: codec=Asserting(Lucene62): {}, docValues:{}, maxPointsInLeafNode=1098, maxMBSortInHeap=5.330974165069652, sim=ClassicSimilarity, locale=en-AU, timezone=Europe/Istanbul\nNOTE: Mac OS X 10.12.1 x86_64/Oracle Corporation 1.8.0_60 (64-bit)/cpus=4,threads=1,free=211766840,total=311427072\nNOTE: All tests run in this JVM: [CreateIndexIT]\n\nProcess finished with exit code 255\n```\n", "created_at": "2016-11-18T19:43:17Z" }, { "body": "@ywelsch thanks for confirming\n", "created_at": "2016-11-18T19:46:47Z" }, { "body": "Playing around with the above test case I noticed that `ShardLockObtainFailedException` was not serializable, which is fixed by d8a6b91.\n", "created_at": "2016-11-18T20:26:21Z" }, { "body": "> I noticed that ShardLockObtainFailedException was not serializable, which is fixed by d8a6b91.\n\nWouldn't that make this non-backport-able, since 5.0 won't have the serialization logic for this exception?\n", "created_at": "2016-11-18T20:27:18Z" }, { "body": "> Wouldn't that make this non-backport-able, since 5.0 won't have the serialization logic for this exception?\n\ncorrect 😞 We need to rethink this. Any suggestions?\n", "created_at": "2016-11-18T20:59:15Z" }, { "body": "> Any suggestions?\n\nI think we have 3 options\n1) Don't return the shard lock exception as a store exception, with the down sides you described.\n2) Build a BWC layer into NodeGatewayStartedShards#writeTo translating that exception into an IOException (and the same for the read side)\n3) Build a BWC layer intot he exception handling logic , which we will need at some point anyway.\n\nGiven the time frame of this and the aim to have it in 5.1, I tend towards option 2.\n", "created_at": "2016-11-19T17:23:47Z" }, { "body": "@s1monw has added a BWC layer for exceptions in #21694. I've rebased this PR so that it includes his changes -> It's ready for review again.", "created_at": "2016-11-21T12:18:32Z" }, { "body": "Can I get another review on this?", "created_at": "2016-11-22T09:01:03Z" }, { "body": "@bleskes I've pushed 3e64d74 addressing comments.", "created_at": "2016-11-22T16:09:02Z" }, { "body": "Thanks @bleskes @abeyad @dakrone ", "created_at": "2016-11-22T18:47:22Z" } ], "number": 21656, "title": "Allow master to assign primary shard to node that has shard store locked during shard state fetching" }
{ "body": "Today it's not possible to add exceptions to the serialization layer\r\nwithout breaking BWC. This commit adds the ability to specify the Version\r\nan exception was added that allows to fall back not NotSerializableExceptionWrapper\r\nif the exception is not present in the streams version.\r\n\r\nRelates to #21656", "number": 21694, "review_comments": [ { "body": "This is a tricky change to make and I want to understand the full consequences first. If I follow the flow of the code correctly, this code is called when a master wants to allocate a replica and checks if there are nodes that have data for the replica. Making this change here means that the fetching for this node will be considered as failed and will trigger a reroute in `AsyncShardFetch`. The replica might however still be allocated in `ReplicaShardAllocator` to another node or later allocated on any node in `BalancedShardsAllocator`. I'm not sure if that's the change we want. Can you leave this code as is for now?", "created_at": "2016-11-21T10:04:16Z" }, { "body": "ok sure - I copied this from your commit 👯‍♂️ ", "created_at": "2016-11-21T11:34:43Z" } ], "title": "Add BWC layer for Exceptions" }
{ "commits": [ { "message": "Add BWC layer for Exceptions\n\nToday it's not possible to add exceptions to the serialization layer\nwithout breaking BWC. This commit adds the ability to specify the Version\nan exception was added that allows to fall back not NotSerializableExceptionWrapper\nif the expection is not present in the streams version.\n\nRelates to #21656" }, { "message": "fix line len" }, { "message": "rollback changes to Store.java" } ], "files": [ { "diff": "@@ -49,6 +49,8 @@\n */\n public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable {\n \n+ public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);\n+ public static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0);\n public static final String REST_EXCEPTION_SKIP_CAUSE = \"rest.exception.cause.skip\";\n public static final String REST_EXCEPTION_SKIP_STACK_TRACE = \"rest.exception.stacktrace.skip\";\n public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;\n@@ -210,8 +212,12 @@ public static ElasticsearchException readException(StreamInput input, int id) th\n /**\n * Returns <code>true</code> iff the given class is a registered for an exception to be read.\n */\n- public static boolean isRegistered(Class<? extends Throwable> exception) {\n- return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);\n+ public static boolean isRegistered(Class<? extends Throwable> exception, Version version) {\n+ ElasticsearchExceptionHandle elasticsearchExceptionHandle = CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.get(exception);\n+ if (elasticsearchExceptionHandle != null) {\n+ return version.onOrAfter(elasticsearchExceptionHandle.versionAdded);\n+ }\n+ return false;\n }\n \n static Set<Class<? extends ElasticsearchException>> getRegisteredKeys() { // for testing\n@@ -432,279 +438,294 @@ public static <T extends Throwable> T writeStackTraces(T throwable, StreamOutput\n */\n enum ElasticsearchExceptionHandle {\n INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,\n- org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),\n+ org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0, UNKNOWN_VERSION_ADDED),\n DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,\n- org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),\n+ org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1, UNKNOWN_VERSION_ADDED),\n EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class,\n- org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),\n+ org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2, UNKNOWN_VERSION_ADDED),\n MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class,\n- org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),\n+ org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3, UNKNOWN_VERSION_ADDED),\n ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class,\n- org.elasticsearch.ElasticsearchSecurityException::new, 4),\n+ org.elasticsearch.ElasticsearchSecurityException::new, 4, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class,\n- org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),\n+ org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5, UNKNOWN_VERSION_ADDED),\n INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class,\n- org.elasticsearch.indices.IndexClosedException::new, 6),\n+ org.elasticsearch.indices.IndexClosedException::new, 6, UNKNOWN_VERSION_ADDED),\n BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class,\n- org.elasticsearch.http.BindHttpException::new, 7),\n+ org.elasticsearch.http.BindHttpException::new, 7, UNKNOWN_VERSION_ADDED),\n REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class,\n- org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),\n+ org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8, UNKNOWN_VERSION_ADDED),\n NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class,\n- org.elasticsearch.node.NodeClosedException::new, 9),\n+ org.elasticsearch.node.NodeClosedException::new, 9, UNKNOWN_VERSION_ADDED),\n SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class,\n- org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),\n+ org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10, UNKNOWN_VERSION_ADDED),\n SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class,\n- org.elasticsearch.index.shard.ShardNotFoundException::new, 11),\n+ org.elasticsearch.index.shard.ShardNotFoundException::new, 11, UNKNOWN_VERSION_ADDED),\n CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class,\n- org.elasticsearch.transport.ConnectTransportException::new, 12),\n+ org.elasticsearch.transport.ConnectTransportException::new, 12, UNKNOWN_VERSION_ADDED),\n NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class,\n- org.elasticsearch.transport.NotSerializableTransportException::new, 13),\n+ org.elasticsearch.transport.NotSerializableTransportException::new, 13, UNKNOWN_VERSION_ADDED),\n RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class,\n- org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),\n+ org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14, UNKNOWN_VERSION_ADDED),\n INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class,\n- org.elasticsearch.indices.IndexCreationException::new, 15),\n+ org.elasticsearch.indices.IndexCreationException::new, 15, UNKNOWN_VERSION_ADDED),\n INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class,\n- org.elasticsearch.index.IndexNotFoundException::new, 16),\n+ org.elasticsearch.index.IndexNotFoundException::new, 16, UNKNOWN_VERSION_ADDED),\n ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class,\n- org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),\n+ org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17, UNKNOWN_VERSION_ADDED),\n BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class,\n- org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),\n+ org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18, UNKNOWN_VERSION_ADDED),\n RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class,\n- org.elasticsearch.ResourceNotFoundException::new, 19),\n+ org.elasticsearch.ResourceNotFoundException::new, 19, UNKNOWN_VERSION_ADDED),\n ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class,\n- org.elasticsearch.transport.ActionTransportException::new, 20),\n+ org.elasticsearch.transport.ActionTransportException::new, 20, UNKNOWN_VERSION_ADDED),\n ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class,\n- org.elasticsearch.ElasticsearchGenerationException::new, 21),\n+ org.elasticsearch.ElasticsearchGenerationException::new, 21, UNKNOWN_VERSION_ADDED),\n // 22 was CreateFailedEngineException\n INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class,\n- org.elasticsearch.index.shard.IndexShardStartedException::new, 23),\n+ org.elasticsearch.index.shard.IndexShardStartedException::new, 23, UNKNOWN_VERSION_ADDED),\n SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,\n- org.elasticsearch.search.SearchContextMissingException::new, 24),\n+ org.elasticsearch.search.SearchContextMissingException::new, 24, UNKNOWN_VERSION_ADDED),\n GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,\n- org.elasticsearch.script.GeneralScriptException::new, 25),\n+ org.elasticsearch.script.GeneralScriptException::new, 25, UNKNOWN_VERSION_ADDED),\n BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,\n- org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),\n+ org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26, UNKNOWN_VERSION_ADDED),\n SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,\n- org.elasticsearch.snapshots.SnapshotCreationException::new, 27),\n- DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,\n- org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),// deprecated in 6.0, remove in 7.0\n+ org.elasticsearch.snapshots.SnapshotCreationException::new, 27, UNKNOWN_VERSION_ADDED),\n+ DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, // deprecated in 6.0, remove in 7.0\n+ org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28, UNKNOWN_VERSION_ADDED),\n DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,\n- org.elasticsearch.index.engine.DocumentMissingException::new, 29),\n+ org.elasticsearch.index.engine.DocumentMissingException::new, 29, UNKNOWN_VERSION_ADDED),\n SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,\n- org.elasticsearch.snapshots.SnapshotException::new, 30),\n+ org.elasticsearch.snapshots.SnapshotException::new, 30, UNKNOWN_VERSION_ADDED),\n INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class,\n- org.elasticsearch.indices.InvalidAliasNameException::new, 31),\n+ org.elasticsearch.indices.InvalidAliasNameException::new, 31, UNKNOWN_VERSION_ADDED),\n INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class,\n- org.elasticsearch.indices.InvalidIndexNameException::new, 32),\n+ org.elasticsearch.indices.InvalidIndexNameException::new, 32, UNKNOWN_VERSION_ADDED),\n INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class,\n- org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),\n+ org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33, UNKNOWN_VERSION_ADDED),\n TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class,\n- org.elasticsearch.transport.TransportException::new, 34),\n+ org.elasticsearch.transport.TransportException::new, 34, UNKNOWN_VERSION_ADDED),\n ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class,\n- org.elasticsearch.ElasticsearchParseException::new, 35),\n+ org.elasticsearch.ElasticsearchParseException::new, 35, UNKNOWN_VERSION_ADDED),\n SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class,\n- org.elasticsearch.search.SearchException::new, 36),\n+ org.elasticsearch.search.SearchException::new, 36, UNKNOWN_VERSION_ADDED),\n MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class,\n- org.elasticsearch.index.mapper.MapperException::new, 37),\n+ org.elasticsearch.index.mapper.MapperException::new, 37, UNKNOWN_VERSION_ADDED),\n INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class,\n- org.elasticsearch.indices.InvalidTypeNameException::new, 38),\n+ org.elasticsearch.indices.InvalidTypeNameException::new, 38, UNKNOWN_VERSION_ADDED),\n SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class,\n- org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),\n- PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40),\n+ org.elasticsearch.snapshots.SnapshotRestoreException::new, 39, UNKNOWN_VERSION_ADDED),\n+ PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40,\n+ UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class,\n- org.elasticsearch.index.shard.IndexShardClosedException::new, 41),\n+ org.elasticsearch.index.shard.IndexShardClosedException::new, 41, UNKNOWN_VERSION_ADDED),\n RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class,\n- org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),\n+ org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42, UNKNOWN_VERSION_ADDED),\n TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class,\n- org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),\n+ org.elasticsearch.index.translog.TruncatedTranslogException::new, 43, UNKNOWN_VERSION_ADDED),\n RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class,\n- org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),\n+ org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class,\n- org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),\n+ org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45, UNKNOWN_VERSION_ADDED),\n NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,\n- org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),\n+ org.elasticsearch.transport.NodeShouldNotConnectException::new, 46, UNKNOWN_VERSION_ADDED),\n // 47 used to be for IndexTemplateAlreadyExistsException which was deprecated in 5.1 removed in 6.0\n TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,\n- org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),\n+ org.elasticsearch.index.translog.TranslogCorruptedException::new, 48, UNKNOWN_VERSION_ADDED),\n CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,\n- org.elasticsearch.cluster.block.ClusterBlockException::new, 49),\n+ org.elasticsearch.cluster.block.ClusterBlockException::new, 49, UNKNOWN_VERSION_ADDED),\n FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,\n- org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),\n+ org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50, UNKNOWN_VERSION_ADDED),\n // 51 used to be for IndexShardAlreadyExistsException which was deprecated in 5.1 removed in 6.0\n VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,\n- org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),\n- ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),\n+ org.elasticsearch.index.engine.VersionConflictEngineException::new, 52, UNKNOWN_VERSION_ADDED),\n+ ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53,\n+ UNKNOWN_VERSION_ADDED),\n // 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException\n- NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),\n+ NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55,\n+ UNKNOWN_VERSION_ADDED),\n SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class,\n- org.elasticsearch.common.settings.SettingsException::new, 56),\n+ org.elasticsearch.common.settings.SettingsException::new, 56, UNKNOWN_VERSION_ADDED),\n INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class,\n- org.elasticsearch.indices.IndexTemplateMissingException::new, 57),\n+ org.elasticsearch.indices.IndexTemplateMissingException::new, 57, UNKNOWN_VERSION_ADDED),\n SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class,\n- org.elasticsearch.transport.SendRequestTransportException::new, 58),\n+ org.elasticsearch.transport.SendRequestTransportException::new, 58, UNKNOWN_VERSION_ADDED),\n ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,\n- org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),\n+ org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59, UNKNOWN_VERSION_ADDED),\n EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,\n- org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),\n+ org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60, UNKNOWN_VERSION_ADDED),\n // 61 used to be for RoutingValidationException\n NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,\n- org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),\n+ org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62, UNKNOWN_VERSION_ADDED),\n ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,\n- org.elasticsearch.indices.AliasFilterParsingException::new, 63),\n+ org.elasticsearch.indices.AliasFilterParsingException::new, 63, UNKNOWN_VERSION_ADDED),\n // 64 was DeleteByQueryFailedEngineException, which was removed in 5.0\n- GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),\n+ GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65,\n+ UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,\n- org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),\n- HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),\n+ org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66, UNKNOWN_VERSION_ADDED),\n+ HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67, UNKNOWN_VERSION_ADDED),\n ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class,\n- org.elasticsearch.ElasticsearchException::new, 68),\n+ org.elasticsearch.ElasticsearchException::new, 68, UNKNOWN_VERSION_ADDED),\n SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class,\n- org.elasticsearch.snapshots.SnapshotMissingException::new, 69),\n+ org.elasticsearch.snapshots.SnapshotMissingException::new, 69, UNKNOWN_VERSION_ADDED),\n PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class,\n- org.elasticsearch.action.PrimaryMissingActionException::new, 70),\n- FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71),\n- SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72),\n+ org.elasticsearch.action.PrimaryMissingActionException::new, 70, UNKNOWN_VERSION_ADDED),\n+ FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71,\n+ UNKNOWN_VERSION_ADDED),\n+ SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72,\n+ UNKNOWN_VERSION_ADDED),\n CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class,\n- org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),\n+ org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73, UNKNOWN_VERSION_ADDED),\n BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class,\n- org.elasticsearch.common.blobstore.BlobStoreException::new, 74),\n+ org.elasticsearch.common.blobstore.BlobStoreException::new, 74, UNKNOWN_VERSION_ADDED),\n INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class,\n- org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),\n+ org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75, UNKNOWN_VERSION_ADDED),\n RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class,\n- org.elasticsearch.index.engine.RecoveryEngineException::new, 76),\n+ org.elasticsearch.index.engine.RecoveryEngineException::new, 76, UNKNOWN_VERSION_ADDED),\n UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class,\n- org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),\n+ org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77, UNKNOWN_VERSION_ADDED),\n TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class,\n- org.elasticsearch.action.TimestampParsingException::new, 78),\n+ org.elasticsearch.action.TimestampParsingException::new, 78, UNKNOWN_VERSION_ADDED),\n ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,\n- org.elasticsearch.action.RoutingMissingException::new, 79),\n- INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,\n- org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), // deprecated in 6.0, remove in 7.0\n+ org.elasticsearch.action.RoutingMissingException::new, 79, UNKNOWN_VERSION_ADDED),\n+ INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, // deprecated in 6.0, remove in 7.0\n+ org.elasticsearch.index.engine.IndexFailedEngineException::new, 80, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,\n- org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),\n+ org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81, UNKNOWN_VERSION_ADDED),\n REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,\n- org.elasticsearch.repositories.RepositoryException::new, 82),\n+ org.elasticsearch.repositories.RepositoryException::new, 82, UNKNOWN_VERSION_ADDED),\n RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class,\n- org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),\n+ org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83, UNKNOWN_VERSION_ADDED),\n NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,\n- org.elasticsearch.transport.NodeDisconnectedException::new, 84),\n+ org.elasticsearch.transport.NodeDisconnectedException::new, 84, UNKNOWN_VERSION_ADDED),\n ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,\n- org.elasticsearch.index.AlreadyExpiredException::new, 85),\n+ org.elasticsearch.index.AlreadyExpiredException::new, 85, UNKNOWN_VERSION_ADDED),\n AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,\n- org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),\n+ org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86, UNKNOWN_VERSION_ADDED),\n // 87 used to be for MergeMappingException\n INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,\n- org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),\n+ org.elasticsearch.indices.InvalidIndexTemplateException::new, 88, UNKNOWN_VERSION_ADDED),\n REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,\n- org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),\n+ org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90, UNKNOWN_VERSION_ADDED),\n AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,\n- org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),\n+ org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91, UNKNOWN_VERSION_ADDED),\n DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class,\n- org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),\n+ org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92, UNKNOWN_VERSION_ADDED),\n // 93 used to be for IndexWarmerMissingException\n NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class,\n- org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),\n+ org.elasticsearch.client.transport.NoNodeAvailableException::new, 94, UNKNOWN_VERSION_ADDED),\n INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class,\n- org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),\n+ org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96, UNKNOWN_VERSION_ADDED),\n ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class,\n- org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),\n+ org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class,\n- org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),\n+ org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class,\n- org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),\n+ org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99, UNKNOWN_VERSION_ADDED),\n SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class,\n- org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),\n+ org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100, UNKNOWN_VERSION_ADDED),\n ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class,\n- org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),\n+ org.elasticsearch.transport.ActionNotFoundTransportException::new, 101, UNKNOWN_VERSION_ADDED),\n TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class,\n- org.elasticsearch.transport.TransportSerializationException::new, 102),\n+ org.elasticsearch.transport.TransportSerializationException::new, 102, UNKNOWN_VERSION_ADDED),\n REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class,\n- org.elasticsearch.transport.RemoteTransportException::new, 103),\n+ org.elasticsearch.transport.RemoteTransportException::new, 103, UNKNOWN_VERSION_ADDED),\n ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class,\n- org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),\n+ org.elasticsearch.index.engine.EngineCreationFailureException::new, 104, UNKNOWN_VERSION_ADDED),\n ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class,\n- org.elasticsearch.cluster.routing.RoutingException::new, 105),\n+ org.elasticsearch.cluster.routing.RoutingException::new, 105, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class,\n- org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),\n+ org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106, UNKNOWN_VERSION_ADDED),\n REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,\n- org.elasticsearch.repositories.RepositoryMissingException::new, 107),\n+ org.elasticsearch.repositories.RepositoryMissingException::new, 107, UNKNOWN_VERSION_ADDED),\n DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,\n- org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),\n+ org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109, UNKNOWN_VERSION_ADDED),\n // 110 used to be FlushNotAllowedEngineException\n NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,\n- org.elasticsearch.common.settings.NoClassSettingsException::new, 111),\n+ org.elasticsearch.common.settings.NoClassSettingsException::new, 111, UNKNOWN_VERSION_ADDED),\n BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,\n- org.elasticsearch.transport.BindTransportException::new, 112),\n+ org.elasticsearch.transport.BindTransportException::new, 112, UNKNOWN_VERSION_ADDED),\n ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException.class,\n- org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException::new, 113),\n+ org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException::new, 113, UNKNOWN_VERSION_ADDED),\n INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class,\n- org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),\n+ org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114, UNKNOWN_VERSION_ADDED),\n TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class,\n- org.elasticsearch.index.translog.TranslogException::new, 115),\n+ org.elasticsearch.index.translog.TranslogException::new, 115, UNKNOWN_VERSION_ADDED),\n PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class,\n- org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),\n+ org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116, UNKNOWN_VERSION_ADDED),\n RETRY_ON_PRIMARY_EXCEPTION(ReplicationOperation.RetryOnPrimaryException.class,\n- ReplicationOperation.RetryOnPrimaryException::new, 117),\n+ ReplicationOperation.RetryOnPrimaryException::new, 117, UNKNOWN_VERSION_ADDED),\n ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class,\n- org.elasticsearch.ElasticsearchTimeoutException::new, 118),\n+ org.elasticsearch.ElasticsearchTimeoutException::new, 118, UNKNOWN_VERSION_ADDED),\n QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class,\n- org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),\n+ org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119, UNKNOWN_VERSION_ADDED),\n REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class,\n- org.elasticsearch.repositories.RepositoryVerificationException::new, 120),\n+ org.elasticsearch.repositories.RepositoryVerificationException::new, 120, UNKNOWN_VERSION_ADDED),\n INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class,\n- org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),\n+ org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121, UNKNOWN_VERSION_ADDED),\n // 123 used to be IndexAlreadyExistsException and was renamed\n RESOURCE_ALREADY_EXISTS_EXCEPTION(ResourceAlreadyExistsException.class,\n- ResourceAlreadyExistsException::new, 123),\n+ ResourceAlreadyExistsException::new, 123, UNKNOWN_VERSION_ADDED),\n // 124 used to be Script.ScriptParseException\n HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,\n- TcpTransport.HttpOnTransportException::new, 125),\n+ TcpTransport.HttpOnTransportException::new, 125, UNKNOWN_VERSION_ADDED),\n MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,\n- org.elasticsearch.index.mapper.MapperParsingException::new, 126),\n+ org.elasticsearch.index.mapper.MapperParsingException::new, 126, UNKNOWN_VERSION_ADDED),\n SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,\n- org.elasticsearch.search.SearchContextException::new, 127),\n+ org.elasticsearch.search.SearchContextException::new, 127, UNKNOWN_VERSION_ADDED),\n SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class,\n- org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),\n+ org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128, UNKNOWN_VERSION_ADDED),\n ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class,\n- org.elasticsearch.index.engine.EngineClosedException::new, 129),\n+ org.elasticsearch.index.engine.EngineClosedException::new, 129, UNKNOWN_VERSION_ADDED),\n NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class,\n- org.elasticsearch.action.NoShardAvailableActionException::new, 130),\n+ org.elasticsearch.action.NoShardAvailableActionException::new, 130, UNKNOWN_VERSION_ADDED),\n UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class,\n- org.elasticsearch.action.UnavailableShardsException::new, 131),\n+ org.elasticsearch.action.UnavailableShardsException::new, 131, UNKNOWN_VERSION_ADDED),\n FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class,\n- org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),\n+ org.elasticsearch.index.engine.FlushFailedEngineException::new, 132, UNKNOWN_VERSION_ADDED),\n CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class,\n- org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),\n+ org.elasticsearch.common.breaker.CircuitBreakingException::new, 133, UNKNOWN_VERSION_ADDED),\n NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class,\n- org.elasticsearch.transport.NodeNotConnectedException::new, 134),\n+ org.elasticsearch.transport.NodeNotConnectedException::new, 134, UNKNOWN_VERSION_ADDED),\n STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class,\n- org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),\n+ org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135, UNKNOWN_VERSION_ADDED),\n RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class,\n- org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),\n+ org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136,\n+ UNKNOWN_VERSION_ADDED),\n TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class,\n- org.elasticsearch.indices.TypeMissingException::new, 137),\n+ org.elasticsearch.indices.TypeMissingException::new, 137, UNKNOWN_VERSION_ADDED),\n FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class,\n- org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),\n+ org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140, UNKNOWN_VERSION_ADDED),\n QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,\n- org.elasticsearch.index.query.QueryShardException::new, 141),\n+ org.elasticsearch.index.query.QueryShardException::new, 141, UNKNOWN_VERSION_ADDED),\n NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,\n- ShardStateAction.NoLongerPrimaryShardException::new, 142),\n- SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143),\n- NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144),\n- STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145),\n+ ShardStateAction.NoLongerPrimaryShardException::new, 142, UNKNOWN_VERSION_ADDED),\n+ SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143,\n+ UNKNOWN_VERSION_ADDED),\n+ NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144,\n+ UNKNOWN_VERSION_ADDED),\n+ STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,\n+ UNKNOWN_VERSION_ADDED),\n TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,\n- org.elasticsearch.tasks.TaskCancelledException::new, 146);\n+ org.elasticsearch.tasks.TaskCancelledException::new, 146, UNKNOWN_VERSION_ADDED),\n+ SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,\n+ org.elasticsearch.env.ShardLockObtainFailedException::new, 147, V_5_1_0_UNRELEASED);\n \n \n final Class<? extends ElasticsearchException> exceptionClass;\n final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;\n final int id;\n+ final Version versionAdded;\n \n <E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,\n- FunctionThatThrowsIOException<StreamInput, E> constructor, int id) {\n+ FunctionThatThrowsIOException<StreamInput, E> constructor, int id,\n+ Version versionAdded) {\n // We need the exceptionClass because you can't dig it out of the constructor reliably.\n this.exceptionClass = exceptionClass;\n this.constructor = constructor;\n+ this.versionAdded = versionAdded;\n this.id = id;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/ElasticsearchException.java", "status": "modified" }, { "diff": "@@ -783,7 +783,7 @@ public void writeException(Throwable throwable) throws IOException {\n writeVInt(17);\n } else {\n ElasticsearchException ex;\n- if (throwable instanceof ElasticsearchException && ElasticsearchException.isRegistered(throwable.getClass())) {\n+ if (throwable instanceof ElasticsearchException && ElasticsearchException.isRegistered(throwable.getClass(), version)) {\n ex = (ElasticsearchException) throwable;\n } else {\n ex = new NotSerializableExceptionWrapper(throwable);", "filename": "core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java", "status": "modified" }, { "diff": "@@ -19,30 +19,36 @@\n \n package org.elasticsearch.env;\n \n+import org.elasticsearch.ElasticsearchException;\n+import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.index.shard.ShardId;\n \n+import java.io.IOException;\n+\n /**\n * Exception used when the in-memory lock for a shard cannot be obtained\n */\n-public class ShardLockObtainFailedException extends Exception {\n- private final ShardId shardId;\n+public class ShardLockObtainFailedException extends ElasticsearchException {\n \n public ShardLockObtainFailedException(ShardId shardId, String message) {\n- super(message);\n- this.shardId = shardId;\n+ super(buildMessage(shardId, message));\n+ this.setShard(shardId);\n }\n \n public ShardLockObtainFailedException(ShardId shardId, String message, Throwable cause) {\n- super(message, cause);\n- this.shardId = shardId;\n+ super(buildMessage(shardId, message), cause);\n+ this.setShard(shardId);\n+ }\n+\n+ public ShardLockObtainFailedException(StreamInput in) throws IOException {\n+ super(in);\n }\n \n- @Override\n- public String getMessage() {\n+ private static String buildMessage(ShardId shardId, String message) {\n StringBuilder sb = new StringBuilder();\n sb.append(shardId.toString());\n sb.append(\": \");\n- sb.append(super.getMessage());\n+ sb.append(message);\n return sb.toString();\n }\n }", "filename": "core/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java", "status": "modified" }, { "diff": "@@ -49,6 +49,7 @@\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentLocation;\n import org.elasticsearch.discovery.DiscoverySettings;\n+import org.elasticsearch.env.ShardLockObtainFailedException;\n import org.elasticsearch.index.AlreadyExpiredException;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.engine.RecoveryEngineException;\n@@ -107,6 +108,7 @@\n import static java.util.Collections.emptySet;\n import static java.util.Collections.singleton;\n import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n+import static org.hamcrest.Matchers.instanceOf;\n \n public class ExceptionSerializationTests extends ESTestCase {\n \n@@ -160,10 +162,10 @@ private void checkClass(Class<?> clazz) {\n if (isEsException(clazz) == false) {\n return;\n }\n- if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class)) == false\n+ if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class), Version.CURRENT) == false\n && ElasticsearchException.class.equals(clazz.getEnclosingClass()) == false) {\n notRegistered.add(clazz);\n- } else if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class))) {\n+ } else if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class), Version.CURRENT)) {\n registered.add(clazz);\n try {\n if (clazz.getMethod(\"writeTo\", StreamOutput.class) != null) {\n@@ -218,10 +220,17 @@ public TestException(StreamInput in) throws IOException {\n }\n \n private <T extends Exception> T serialize(T exception) throws IOException {\n- ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersion(random()), exception);\n+ return serialize(exception, VersionUtils.randomVersion(random()));\n+ }\n+\n+ private <T extends Exception> T serialize(T exception, Version version) throws IOException {\n+ ElasticsearchAssertions.assertVersionSerializable(version, exception);\n BytesStreamOutput out = new BytesStreamOutput();\n+ out.setVersion(version);\n out.writeException(exception);\n+\n StreamInput in = out.bytes().streamInput();\n+ in.setVersion(version);\n return in.readException();\n }\n \n@@ -769,6 +778,7 @@ public void testIds() {\n ids.put(144, org.elasticsearch.cluster.NotMasterException.class);\n ids.put(145, org.elasticsearch.ElasticsearchStatusException.class);\n ids.put(146, org.elasticsearch.tasks.TaskCancelledException.class);\n+ ids.put(147, org.elasticsearch.env.ShardLockObtainFailedException.class);\n \n Map<Class<? extends ElasticsearchException>, Integer> reverse = new HashMap<>();\n for (Map.Entry<Integer, Class<? extends ElasticsearchException>> entry : ids.entrySet()) {\n@@ -826,4 +836,28 @@ public void testElasticsearchRemoteException() throws IOException {\n assertEquals(ex.status(), e.status());\n assertEquals(RestStatus.TOO_MANY_REQUESTS, e.status());\n }\n+\n+ public void testShardLockObtainFailedException() throws IOException {\n+ ShardId shardId = new ShardId(\"foo\", \"_na_\", 1);\n+ ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, \"boom\");\n+ Version version = VersionUtils.randomVersionBetween(random(),\n+ Version.V_5_0_0, Version.CURRENT);\n+ if (version.before(ElasticsearchException.V_5_1_0_UNRELEASED)) {\n+ // remove this once 5_1_0 is released randomVersionBetween asserts that this version is in the constant table..\n+ version = ElasticsearchException.V_5_1_0_UNRELEASED;\n+ }\n+ ShardLockObtainFailedException ex = serialize(orig, version);\n+ assertEquals(orig.getMessage(), ex.getMessage());\n+ assertEquals(orig.getShardId(), ex.getShardId());\n+ }\n+\n+ public void testBWCShardLockObtainFailedException() throws IOException {\n+ ShardId shardId = new ShardId(\"foo\", \"_na_\", 1);\n+ ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, \"boom\");\n+ Exception ex = serialize((Exception)orig, Version.V_5_0_0);\n+ assertThat(ex, instanceOf(NotSerializableExceptionWrapper.class));\n+ assertEquals(\"shard_lock_obtain_failed_exception: [foo][1]: boom\", ex.getMessage());\n+ }\n+\n+\n }", "filename": "core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java", "status": "modified" }, { "diff": "@@ -284,6 +284,7 @@ public void testUnknownVersions() {\n assertUnknownVersion(OsStats.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant\n assertUnknownVersion(QueryStringQueryBuilder.V_5_1_0_UNRELEASED);\n assertUnknownVersion(SimpleQueryStringBuilder.V_5_1_0_UNRELEASED);\n+ assertUnknownVersion(ElasticsearchException.V_5_1_0_UNRELEASED);\n // once we released 5.0.0 and it's added to Version.java we need to remove this constant\n assertUnknownVersion(Script.V_5_1_0_UNRELEASED);\n // once we released 5.0.0 and it's added to Version.java we need to remove this constant", "filename": "core/src/test/java/org/elasticsearch/VersionTests.java", "status": "modified" } ] }
{ "body": "<!--\r\nGitHub is reserved for bug reports and feature requests. The best place\r\nto ask a general question is at the Elastic Discourse forums at\r\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\r\na feature request, please include one and only one of the below blocks\r\nin your new issue. Note that whether you're filing a bug report or a\r\nfeature request, ensure that your submission is for an\r\n[OS that we support](https://www.elastic.co/support/matrix#show_os).\r\nBug reports on an OS that we do not support or feature requests\r\nspecific to an OS that we do not support will be closed.\r\n-->\r\n\r\n<!--\r\nIf you are filing a bug report, please remove the below feature\r\nrequest block and provide responses for all of the below items.\r\n-->\r\n\r\n**Elasticsearch version**: 5.0.0, 5.0.1\r\n\r\n**Plugins installed**: [Kibana]\r\n\r\n**JVM version**: 1.8.0_92\r\n\r\n**OS version**: Elastic.co Cloud (5.0.0) / OS X El Capitan (5.0.1)\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nRequesting `nested` query on multiple indices with `inner_hits` and `ignore_unmapped: true` causes a NullPointerException on indices which do not have the desired `nested` property. \r\n\r\nIt shows correct `hits.total` while not returning all the matched document(s) in `hits.hits`.\r\n\r\n**Steps to reproduce**:\r\n\r\nI've successfully reproduced the behavior using two indices while only one of them having the nested property.\r\n\r\n```sh\r\ncurl -XDELETE localhost:9200/people\r\ncurl -XPUT -d '{\r\n \"mappings\": {\r\n \"employee\": {\r\n \"properties\": {\r\n \"companyId\": { \"type\": \"string\" }\r\n }\r\n }\r\n }\r\n}' localhost:9200/people\r\n\r\ncurl -XDELETE localhost:9200/things\r\ncurl -XPUT -d '{\r\n \"mappings\": {\r\n \"product\": {\r\n \"properties\": {\r\n \"companyId\": { \"type\": \"string\" },\r\n \"parts\": { \"type\": \"nested\", \"properties\": { \"companyId\": { \"type\": \"string\" } } }\r\n }\r\n }\r\n }\r\n}' localhost:9200/things\r\n\r\ncurl -XPUT -d '{ \"companyId\": \"foo\" }' localhost:9200/people/employee/1\r\ncurl -XPUT -d '{ \"companyId\": \"foo\", \"parts\": [{ \"companyId\": \"foo\" }] }' localhost:9200/things/product/1\r\n\r\nsleep 1\r\n\r\n# Fails\r\ncurl -d '{\r\n \"query\": {\r\n \"bool\": {\r\n \"should\": [\r\n {\r\n \"nested\": {\r\n \"path\": \"parts\",\r\n \"query\": { \"term\": { \"parts.companyId\": \"foo\" } },\r\n \"ignore_unmapped\": true,\r\n \"inner_hits\": {}\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"companyId\": \"foo\"\r\n }\r\n }\r\n ],\r\n \"minimum_should_match\": 1\r\n }\r\n }\r\n}' localhost:9200/people,things/_search\r\n\r\n# Succeeds\r\ncurl -d '{\r\n \"query\": {\r\n \"bool\": {\r\n \"should\": [\r\n {\r\n \"nested\": {\r\n \"path\": \"parts\",\r\n \"query\": { \"term\": { \"parts.companyId\": \"foo\" } },\r\n \"ignore_unmapped\": true\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"companyId\": \"foo\"\r\n }\r\n }\r\n ],\r\n \"minimum_should_match\": 1\r\n }\r\n }\r\n}' localhost:9200/people,things/_search\r\n```\r\n\r\nThe failing request responds with the body:\r\n\r\n```json\r\n{\r\n \"took\": 31,\r\n \"timed_out\": false,\r\n \"_shards\": {\r\n \"total\": 10,\r\n \"successful\": 9,\r\n \"failed\": 1,\r\n \"failures\": [\r\n {\r\n \"shard\": 3,\r\n \"index\": \"people\",\r\n \"node\": \"LuRj8eynRU-s1Wfvn9VELw\",\r\n \"reason\": {\r\n \"type\": \"null_pointer_exception\",\r\n \"reason\": null\r\n }\r\n }\r\n ]\r\n },\r\n \"hits\": {\r\n \"total\": 2,\r\n \"max_score\": 0.5753642,\r\n \"hits\": [\r\n {\r\n \"_index\": \"things\",\r\n \"_type\": \"product\",\r\n \"_id\": \"1\",\r\n \"_score\": 0.5753642,\r\n \"_source\": {\r\n \"companyId\": \"foo\",\r\n \"parts\": [\r\n {\r\n \"companyId\": \"foo\"\r\n }\r\n ]\r\n },\r\n \"inner_hits\": {\r\n \"parts\": {\r\n \"hits\": {\r\n \"total\": 1,\r\n \"max_score\": 0.2876821,\r\n \"hits\": [\r\n {\r\n \"_nested\": {\r\n \"field\": \"parts\",\r\n \"offset\": 0\r\n },\r\n \"_score\": 0.2876821,\r\n \"_source\": {\r\n \"companyId\": \"foo\"\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\n**Provide logs (if relevant)**:\r\n\r\n```\r\n[2016-11-17T20:40:33,561][DEBUG][o.e.a.s.TransportSearchAction] [LuRj8ey] [12] Failed to execute fetch phase\r\norg.elasticsearch.transport.RemoteTransportException: [LuRj8ey][127.0.0.1:9300][indices:data/read/search[phase/fetch/id]]\r\nCaused by: java.lang.NullPointerException\r\n at org.elasticsearch.search.fetch.subphase.InnerHitsContext$NestedInnerHits.topDocs(InnerHitsContext.java:135) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.search.fetch.subphase.InnerHitsFetchSubPhase.hitExecute(InnerHitsFetchSubPhase.java:55) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:161) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:474) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$13(SearchTransportService.java:311) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:520) [elasticsearch-5.0.1.jar:5.0.1]\r\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.1.jar:5.0.1]\r\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_92]\r\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_92]\r\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_92]\r\n```\r\n\r\nThanks.", "comments": [ { "body": "@martijnvg please could you take a look?\n", "created_at": "2016-11-19T14:34:00Z" }, { "body": "Thanks for reporting @inbeom! The cause of the error is that inner hits is unaware of `ignore_unmapped` being set. This should be fixed.", "created_at": "2016-11-21T07:44:33Z" }, { "body": "@martijnvg I'm still seeing this in 5.1.1. Is a fix landed for a later release? I'm seeing it for this case though:\r\n\r\n```curl\r\n# Collections / datasets + row hits AIO\r\nGET /v8_*/collection,dataset/_search\r\n{\r\n \"query\": {\r\n \"bool\": {\r\n \"should\": [\r\n {\r\n \"simple_query_string\": {\r\n \"query\": \"google\"\r\n }\r\n }, {\r\n \"has_child\": {\r\n \"type\": \"row\",\r\n \"query\": {\r\n \"simple_query_string\": {\r\n \"query\": \"google\"\r\n }\r\n },\r\n \"ignore_unmapped\": true,\r\n \"inner_hits\": {\r\n \"size\": 1\r\n }\r\n }\r\n }\r\n ],\r\n \"minimum_should_match\": 1\r\n }\r\n },\r\n \"size\": 100\r\n}\r\n```\r\n\r\nResulting in:\r\n\r\n```json\r\n\"failures\": [\r\n {\r\n \"shard\": 0,\r\n \"index\": \"v8_collections\",\r\n \"node\": \"-vLWPdGXScSbVZWrhs7TJw\",\r\n \"reason\": {\r\n \"type\": \"null_pointer_exception\",\r\n \"reason\": null\r\n }\r\n }\r\n ]\r\n```", "created_at": "2016-12-21T16:21:50Z" }, { "body": "@yaycmyk No, unfortunately not. It will land in 5.2", "created_at": "2016-12-21T18:02:11Z" }, { "body": "Closed by https://github.com/elastic/elasticsearch/pull/21693", "created_at": "2016-12-23T13:27:59Z" } ], "number": 21620, "title": "NullPointerException caused by multiple indices search w/ nested, inner_hits" }
{ "body": "Don't inline inner hits if the query the inner hits is inlined into can't resolve mappings and ignore_unmapped has been set to true.\r\n\r\nCloses #21620", "number": 21693, "review_comments": [ { "body": "Please add javadocs.", "created_at": "2016-12-21T13:58:59Z" }, { "body": "please add javadocs", "created_at": "2016-12-21T16:38:08Z" } ], "title": "Inner hits and ignore unmapped" }
{ "commits": [ { "message": "inner hits: Don't inline inner hits if the query the inner hits is inlined into can't resolve mappings and ignore_unmapped has been set to true\n\nCloses #21620" } ], "files": [ { "diff": "@@ -145,8 +145,8 @@ public InnerHitBuilder innerHit() {\n return innerHitBuilder;\n }\n \n- public HasChildQueryBuilder innerHit(InnerHitBuilder innerHit) {\n- this.innerHitBuilder = new InnerHitBuilder(Objects.requireNonNull(innerHit), query, type);\n+ public HasChildQueryBuilder innerHit(InnerHitBuilder innerHit, boolean ignoreUnmapped) {\n+ this.innerHitBuilder = new InnerHitBuilder(Objects.requireNonNull(innerHit), query, type, ignoreUnmapped);\n return this;\n }\n \n@@ -268,13 +268,13 @@ public static HasChildQueryBuilder fromXContent(QueryParseContext parseContext)\n }\n }\n HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder(childType, iqb, scoreMode);\n- if (innerHitBuilder != null) {\n- hasChildQueryBuilder.innerHit(innerHitBuilder);\n- }\n hasChildQueryBuilder.minMaxChildren(minChildren, maxChildren);\n hasChildQueryBuilder.queryName(queryName);\n hasChildQueryBuilder.boost(boost);\n hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped);\n+ if (innerHitBuilder != null) {\n+ hasChildQueryBuilder.innerHit(innerHitBuilder, ignoreUnmapped);\n+ }\n return hasChildQueryBuilder;\n }\n \n@@ -318,7 +318,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException {\n context.setTypes(previousTypes);\n }\n \n- DocumentMapper childDocMapper = context.getMapperService().documentMapper(type);\n+ DocumentMapper childDocMapper = context.documentMapper(type);\n if (childDocMapper == null) {\n if (ignoreUnmapped) {\n return new MatchNoDocsQuery();", "filename": "core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java", "status": "modified" }, { "diff": "@@ -124,8 +124,8 @@ public InnerHitBuilder innerHit() {\n return innerHit;\n }\n \n- public HasParentQueryBuilder innerHit(InnerHitBuilder innerHit) {\n- this.innerHit = new InnerHitBuilder(innerHit, query, type);\n+ public HasParentQueryBuilder innerHit(InnerHitBuilder innerHit, boolean ignoreUnmapped) {\n+ this.innerHit = new InnerHitBuilder(innerHit, query, type, ignoreUnmapped);\n return this;\n }\n \n@@ -159,7 +159,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException {\n context.setTypes(previousTypes);\n }\n \n- DocumentMapper parentDocMapper = context.getMapperService().documentMapper(type);\n+ DocumentMapper parentDocMapper = context.documentMapper(type);\n if (parentDocMapper == null) {\n if (ignoreUnmapped) {\n return new MatchNoDocsQuery();\n@@ -276,7 +276,7 @@ public static HasParentQueryBuilder fromXContent(QueryParseContext parseContext)\n .queryName(queryName)\n .boost(boost);\n if (innerHits != null) {\n- queryBuilder.innerHit(innerHits);\n+ queryBuilder.innerHit(innerHits, ignoreUnmapped);\n }\n return queryBuilder;\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.index.query;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.support.ToXContentToBytes;\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParsingException;\n@@ -45,7 +46,6 @@\n \n import java.io.IOException;\n import java.util.ArrayList;\n-import java.util.Collections;\n import java.util.HashMap;\n import java.util.HashSet;\n import java.util.List;\n@@ -59,13 +59,15 @@\n public final class InnerHitBuilder extends ToXContentToBytes implements Writeable {\n \n public static final ParseField NAME_FIELD = new ParseField(\"name\");\n+ public static final ParseField IGNORE_UNMAPPED = new ParseField(\"ignore_unmapped\");\n public static final ParseField INNER_HITS_FIELD = new ParseField(\"inner_hits\");\n public static final QueryBuilder DEFAULT_INNER_HIT_QUERY = new MatchAllQueryBuilder();\n \n private static final ObjectParser<InnerHitBuilder, QueryParseContext> PARSER = new ObjectParser<>(\"inner_hits\", InnerHitBuilder::new);\n \n static {\n PARSER.declareString(InnerHitBuilder::setName, NAME_FIELD);\n+ PARSER.declareBoolean((innerHitBuilder, value) -> innerHitBuilder.ignoreUnmapped = value, IGNORE_UNMAPPED);\n PARSER.declareInt(InnerHitBuilder::setFrom, SearchSourceBuilder.FROM_FIELD);\n PARSER.declareInt(InnerHitBuilder::setSize, SearchSourceBuilder.SIZE_FIELD);\n PARSER.declareBoolean(InnerHitBuilder::setExplain, SearchSourceBuilder.EXPLAIN_FIELD);\n@@ -130,6 +132,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl\n private String name;\n private String nestedPath;\n private String parentChildType;\n+ private boolean ignoreUnmapped;\n \n private int from;\n private int size = 3;\n@@ -151,6 +154,7 @@ public InnerHitBuilder() {\n \n private InnerHitBuilder(InnerHitBuilder other) {\n name = other.name;\n+ this.ignoreUnmapped = other.ignoreUnmapped;\n from = other.from;\n size = other.size;\n explain = other.explain;\n@@ -180,19 +184,21 @@ private InnerHitBuilder(InnerHitBuilder other) {\n }\n \n \n- InnerHitBuilder(InnerHitBuilder other, String nestedPath, QueryBuilder query) {\n+ InnerHitBuilder(InnerHitBuilder other, String nestedPath, QueryBuilder query, boolean ignoreUnmapped) {\n this(other);\n this.query = query;\n this.nestedPath = nestedPath;\n+ this.ignoreUnmapped = ignoreUnmapped;\n if (name == null) {\n this.name = nestedPath;\n }\n }\n \n- InnerHitBuilder(InnerHitBuilder other, QueryBuilder query, String parentChildType) {\n+ InnerHitBuilder(InnerHitBuilder other, QueryBuilder query, String parentChildType, boolean ignoreUnmapped) {\n this(other);\n this.query = query;\n this.parentChildType = parentChildType;\n+ this.ignoreUnmapped = ignoreUnmapped;\n if (name == null) {\n this.name = parentChildType;\n }\n@@ -205,6 +211,9 @@ public InnerHitBuilder(StreamInput in) throws IOException {\n name = in.readOptionalString();\n nestedPath = in.readOptionalString();\n parentChildType = in.readOptionalString();\n+ if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {\n+ ignoreUnmapped = in.readBoolean();\n+ }\n from = in.readVInt();\n size = in.readVInt();\n explain = in.readBoolean();\n@@ -243,6 +252,9 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeOptionalString(name);\n out.writeOptionalString(nestedPath);\n out.writeOptionalString(parentChildType);\n+ if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {\n+ out.writeBoolean(ignoreUnmapped);\n+ }\n out.writeVInt(from);\n out.writeVInt(size);\n out.writeBoolean(explain);\n@@ -289,6 +301,13 @@ public InnerHitBuilder setName(String name) {\n return this;\n }\n \n+ /**\n+ * Whether to include inner hits in the search response hits if required mappings is missing\n+ */\n+ public boolean isIgnoreUnmapped() {\n+ return ignoreUnmapped;\n+ }\n+\n public int getFrom() {\n return from;\n }\n@@ -523,6 +542,14 @@ public InnerHitsContext.BaseInnerHits build(SearchContext parentSearchContext,\n QueryShardContext queryShardContext = parentSearchContext.getQueryShardContext();\n if (nestedPath != null) {\n ObjectMapper nestedObjectMapper = queryShardContext.getObjectMapper(nestedPath);\n+ if (nestedObjectMapper == null) {\n+ if (ignoreUnmapped == false) {\n+ throw new IllegalStateException(\"[\" + query.getName() + \"] no mapping found for type [\" + nestedPath + \"]\");\n+ } else {\n+ return null;\n+ }\n+ }\n+\n ObjectMapper parentObjectMapper = queryShardContext.nestedScope().nextLevel(nestedObjectMapper);\n InnerHitsContext.NestedInnerHits nestedInnerHits = new InnerHitsContext.NestedInnerHits(\n name, parentSearchContext, parentObjectMapper, nestedObjectMapper\n@@ -535,7 +562,15 @@ public InnerHitsContext.BaseInnerHits build(SearchContext parentSearchContext,\n innerHitsContext.addInnerHitDefinition(nestedInnerHits);\n return nestedInnerHits;\n } else if (parentChildType != null) {\n- DocumentMapper documentMapper = queryShardContext.getMapperService().documentMapper(parentChildType);\n+ DocumentMapper documentMapper = queryShardContext.documentMapper(parentChildType);\n+ if (documentMapper == null) {\n+ if (ignoreUnmapped == false) {\n+ throw new IllegalStateException(\"[\" + query.getName() + \"] no mapping found for type [\" + parentChildType + \"]\");\n+ } else {\n+ return null;\n+ }\n+ }\n+\n InnerHitsContext.ParentChildInnerHits parentChildInnerHits = new InnerHitsContext.ParentChildInnerHits(\n name, parentSearchContext, queryShardContext.getMapperService(), documentMapper\n );\n@@ -556,7 +591,9 @@ private void buildChildInnerHits(SearchContext parentSearchContext, InnerHitsCon\n InnerHitsContext.BaseInnerHits childInnerHit = entry.getValue().build(\n parentSearchContext, new InnerHitsContext()\n );\n- childInnerHits.put(entry.getKey(), childInnerHit);\n+ if (childInnerHit != null) {\n+ childInnerHits.put(entry.getKey(), childInnerHit);\n+ }\n }\n innerHits.setChildInnerHits(childInnerHits);\n }\n@@ -617,6 +654,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n if (name != null) {\n builder.field(NAME_FIELD.getPreferredName(), name);\n }\n+ builder.field(IGNORE_UNMAPPED.getPreferredName(), ignoreUnmapped);\n builder.field(SearchSourceBuilder.FROM_FIELD.getPreferredName(), from);\n builder.field(SearchSourceBuilder.SIZE_FIELD.getPreferredName(), size);\n builder.field(SearchSourceBuilder.VERSION_FIELD.getPreferredName(), version);\n@@ -672,6 +710,7 @@ public boolean equals(Object o) {\n return Objects.equals(name, that.name) &&\n Objects.equals(nestedPath, that.nestedPath) &&\n Objects.equals(parentChildType, that.parentChildType) &&\n+ Objects.equals(ignoreUnmapped, that.ignoreUnmapped) &&\n Objects.equals(from, that.from) &&\n Objects.equals(size, that.size) &&\n Objects.equals(explain, that.explain) &&\n@@ -689,8 +728,8 @@ public boolean equals(Object o) {\n \n @Override\n public int hashCode() {\n- return Objects.hash(name, nestedPath, parentChildType, from, size, explain, version, trackScores, storedFieldsContext,\n- docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, query, childInnerHits);\n+ return Objects.hash(name, nestedPath, parentChildType, ignoreUnmapped, from, size, explain, version, trackScores,\n+ storedFieldsContext, docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, query, childInnerHits);\n }\n \n public static InnerHitBuilder fromXContent(QueryParseContext context) throws IOException {", "filename": "core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java", "status": "modified" }, { "diff": "@@ -103,8 +103,8 @@ public InnerHitBuilder innerHit() {\n return innerHitBuilder;\n }\n \n- public NestedQueryBuilder innerHit(InnerHitBuilder innerHit) {\n- this.innerHitBuilder = new InnerHitBuilder(innerHit, path, query);\n+ public NestedQueryBuilder innerHit(InnerHitBuilder innerHit, boolean ignoreUnmapped) {\n+ this.innerHitBuilder = new InnerHitBuilder(innerHit, path, query, ignoreUnmapped);\n return this;\n }\n \n@@ -194,7 +194,7 @@ public static NestedQueryBuilder fromXContent(QueryParseContext parseContext) th\n .queryName(queryName)\n .boost(boost);\n if (innerHitBuilder != null) {\n- queryBuilder.innerHit(innerHitBuilder);\n+ queryBuilder.innerHit(innerHitBuilder, ignoreUnmapped);\n }\n return queryBuilder;\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java", "status": "modified" }, { "diff": "@@ -41,6 +41,7 @@\n import org.elasticsearch.index.fielddata.IndexFieldData;\n import org.elasticsearch.index.fielddata.IndexFieldDataService;\n import org.elasticsearch.index.mapper.ContentPath;\n+import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.MappedFieldType;\n import org.elasticsearch.index.mapper.Mapper;\n import org.elasticsearch.index.mapper.MapperService;\n@@ -207,6 +208,14 @@ public ObjectMapper getObjectMapper(String name) {\n return mapperService.getObjectMapper(name);\n }\n \n+ /**\n+ * Returns s {@link DocumentMapper} instance for the given type.\n+ * Delegates to {@link MapperService#documentMapper(String)}\n+ */\n+ public DocumentMapper documentMapper(String type) {\n+ return mapperService.documentMapper(type);\n+ }\n+\n /**\n * Gets the search analyzer for the given field, or the default if there is none present for the field\n * TODO: remove this by moving defaults into mappers themselves", "filename": "core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java", "status": "modified" }, { "diff": "@@ -104,13 +104,13 @@ protected HasChildQueryBuilder doCreateTestQueryBuilder() {\n HasChildQueryBuilder hqb = new HasChildQueryBuilder(CHILD_TYPE, innerQueryBuilder,\n RandomPicks.randomFrom(random(), ScoreMode.values()));\n hqb.minMaxChildren(min, max);\n+ hqb.ignoreUnmapped(randomBoolean());\n if (randomBoolean()) {\n hqb.innerHit(new InnerHitBuilder()\n .setName(randomAsciiOfLengthBetween(1, 10))\n .setSize(randomIntBetween(0, 100))\n- .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)));\n+ .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)), hqb.ignoreUnmapped());\n }\n- hqb.ignoreUnmapped(randomBoolean());\n return hqb;\n }\n \n@@ -189,6 +189,7 @@ public void testFromJson() throws IOException {\n \" \\\"_name\\\" : \\\"WNzYMJKRwePuRBh\\\",\\n\" +\n \" \\\"inner_hits\\\" : {\\n\" +\n \" \\\"name\\\" : \\\"inner_hits_name\\\",\\n\" +\n+ \" \\\"ignore_unmapped\\\" : false,\\n\" +\n \" \\\"from\\\" : 0,\\n\" +\n \" \\\"size\\\" : 100,\\n\" +\n \" \\\"version\\\" : false,\\n\" +\n@@ -211,7 +212,7 @@ public void testFromJson() throws IOException {\n assertEquals(query, queryBuilder.childType(), \"child\");\n assertEquals(query, queryBuilder.scoreMode(), ScoreMode.Avg);\n assertNotNull(query, queryBuilder.innerHit());\n- InnerHitBuilder expected = new InnerHitBuilder(new InnerHitBuilder(), queryBuilder.query(), \"child\")\n+ InnerHitBuilder expected = new InnerHitBuilder(new InnerHitBuilder(), queryBuilder.query(), \"child\", false)\n .setName(\"inner_hits_name\")\n .setSize(100)\n .addSort(new FieldSortBuilder(\"mapped_string\").order(SortOrder.ASC));", "filename": "core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -85,13 +85,13 @@ protected HasParentQueryBuilder doCreateTestQueryBuilder() {\n innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString());\n }\n HasParentQueryBuilder hqb = new HasParentQueryBuilder(PARENT_TYPE, innerQueryBuilder, randomBoolean());\n+ hqb.ignoreUnmapped(randomBoolean());\n if (randomBoolean()) {\n hqb.innerHit(new InnerHitBuilder()\n .setName(randomAsciiOfLengthBetween(1, 10))\n .setSize(randomIntBetween(0, 100))\n- .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)));\n+ .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)), hqb.ignoreUnmapped());\n }\n- hqb.ignoreUnmapped(randomBoolean());\n return hqb;\n }\n ", "filename": "core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -34,7 +34,9 @@\n import org.elasticsearch.search.SearchModule;\n import org.elasticsearch.search.builder.SearchSourceBuilder;\n import org.elasticsearch.search.fetch.subphase.FetchSourceContext;\n+import org.elasticsearch.search.fetch.subphase.InnerHitsContext;\n import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests;\n+import org.elasticsearch.search.internal.SearchContext;\n import org.elasticsearch.search.sort.SortBuilder;\n import org.elasticsearch.search.sort.SortBuilders;\n import org.elasticsearch.search.sort.SortOrder;\n@@ -57,6 +59,8 @@\n import static org.hamcrest.Matchers.not;\n import static org.hamcrest.Matchers.notNullValue;\n import static org.hamcrest.Matchers.sameInstance;\n+import static org.mockito.Mockito.mock;\n+import static org.mockito.Mockito.when;\n \n public class InnerHitBuilderTests extends ESTestCase {\n \n@@ -120,7 +124,7 @@ public void testEqualsAndHashcode() throws IOException {\n public void testInlineLeafInnerHitsNestedQuery() throws Exception {\n InnerHitBuilder leafInnerHits = randomInnerHits();\n NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None);\n- nestedQueryBuilder.innerHit(leafInnerHits);\n+ nestedQueryBuilder.innerHit(leafInnerHits, false);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n nestedQueryBuilder.extractInnerHitBuilders(innerHitBuilders);\n assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());\n@@ -129,7 +133,7 @@ public void testInlineLeafInnerHitsNestedQuery() throws Exception {\n public void testInlineLeafInnerHitsHasChildQuery() throws Exception {\n InnerHitBuilder leafInnerHits = randomInnerHits();\n HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder(\"type\", new MatchAllQueryBuilder(), ScoreMode.None)\n- .innerHit(leafInnerHits);\n+ .innerHit(leafInnerHits, false);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n hasChildQueryBuilder.extractInnerHitBuilders(innerHitBuilders);\n assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());\n@@ -138,7 +142,7 @@ public void testInlineLeafInnerHitsHasChildQuery() throws Exception {\n public void testInlineLeafInnerHitsHasParentQuery() throws Exception {\n InnerHitBuilder leafInnerHits = randomInnerHits();\n HasParentQueryBuilder hasParentQueryBuilder = new HasParentQueryBuilder(\"type\", new MatchAllQueryBuilder(), false)\n- .innerHit(leafInnerHits);\n+ .innerHit(leafInnerHits, false);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n hasParentQueryBuilder.extractInnerHitBuilders(innerHitBuilders);\n assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());\n@@ -147,7 +151,7 @@ public void testInlineLeafInnerHitsHasParentQuery() throws Exception {\n public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() {\n InnerHitBuilder leafInnerHits = randomInnerHits();\n NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None)\n- .innerHit(leafInnerHits);\n+ .innerHit(leafInnerHits, false);\n BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().should(nestedQueryBuilder);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n boolQueryBuilder.extractInnerHitBuilders(innerHitBuilders);\n@@ -157,7 +161,7 @@ public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() {\n public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() {\n InnerHitBuilder leafInnerHits = randomInnerHits();\n NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None)\n- .innerHit(leafInnerHits);\n+ .innerHit(leafInnerHits, false);\n ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(nestedQueryBuilder);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n constantScoreQueryBuilder.extractInnerHitBuilders(innerHitBuilders);\n@@ -167,10 +171,10 @@ public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() {\n public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() {\n InnerHitBuilder leafInnerHits1 = randomInnerHits();\n NestedQueryBuilder nestedQueryBuilder1 = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None)\n- .innerHit(leafInnerHits1);\n+ .innerHit(leafInnerHits1, false);\n InnerHitBuilder leafInnerHits2 = randomInnerHits();\n NestedQueryBuilder nestedQueryBuilder2 = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None)\n- .innerHit(leafInnerHits2);\n+ .innerHit(leafInnerHits2, false);\n BoostingQueryBuilder constantScoreQueryBuilder = new BoostingQueryBuilder(nestedQueryBuilder1, nestedQueryBuilder2);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n constantScoreQueryBuilder.extractInnerHitBuilders(innerHitBuilders);\n@@ -181,13 +185,68 @@ public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() {\n public void testInlineLeafInnerHitsNestedQueryViaFunctionScoreQuery() {\n InnerHitBuilder leafInnerHits = randomInnerHits();\n NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None)\n- .innerHit(leafInnerHits);\n+ .innerHit(leafInnerHits, false);\n FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(nestedQueryBuilder);\n Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n ((AbstractQueryBuilder<?>) functionScoreQueryBuilder).extractInnerHitBuilders(innerHitBuilders);\n assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());\n }\n \n+ public void testBuild_ingoreUnmappedNestQuery() throws Exception {\n+ QueryShardContext queryShardContext = mock(QueryShardContext.class);\n+ when(queryShardContext.getObjectMapper(\"path\")).thenReturn(null);\n+ SearchContext searchContext = mock(SearchContext.class);\n+ when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);\n+\n+ InnerHitBuilder leafInnerHits = randomInnerHits();\n+ NestedQueryBuilder query1 = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None);\n+ query1.innerHit(leafInnerHits, false);\n+ expectThrows(IllegalStateException.class, () -> query1.innerHit().build(searchContext, new InnerHitsContext()));\n+\n+ NestedQueryBuilder query2 = new NestedQueryBuilder(\"path\", new MatchAllQueryBuilder(), ScoreMode.None);\n+ query2.innerHit(leafInnerHits, true);\n+ InnerHitsContext innerHitsContext = new InnerHitsContext();\n+ query2.innerHit().build(searchContext, innerHitsContext);\n+ assertThat(innerHitsContext.getInnerHits().size(), equalTo(0));\n+ }\n+\n+ public void testBuild_ignoreUnmappedHasChildQuery() throws Exception {\n+ QueryShardContext queryShardContext = mock(QueryShardContext.class);\n+ when(queryShardContext.documentMapper(\"type\")).thenReturn(null);\n+ SearchContext searchContext = mock(SearchContext.class);\n+ when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);\n+\n+ InnerHitBuilder leafInnerHits = randomInnerHits();\n+ HasChildQueryBuilder query1 = new HasChildQueryBuilder(\"type\", new MatchAllQueryBuilder(), ScoreMode.None)\n+ .innerHit(leafInnerHits, false);\n+ expectThrows(IllegalStateException.class, () -> query1.innerHit().build(searchContext, new InnerHitsContext()));\n+\n+ HasChildQueryBuilder query2 = new HasChildQueryBuilder(\"type\", new MatchAllQueryBuilder(), ScoreMode.None)\n+ .innerHit(leafInnerHits, true);\n+ InnerHitsContext innerHitsContext = new InnerHitsContext();\n+ query2.innerHit().build(searchContext, innerHitsContext);\n+ assertThat(innerHitsContext.getInnerHits().size(), equalTo(0));\n+ }\n+\n+ public void testBuild_ingoreUnmappedHasParentQuery() throws Exception {\n+ QueryShardContext queryShardContext = mock(QueryShardContext.class);\n+ when(queryShardContext.documentMapper(\"type\")).thenReturn(null);\n+ SearchContext searchContext = mock(SearchContext.class);\n+ when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);\n+\n+ InnerHitBuilder leafInnerHits = randomInnerHits();\n+ HasParentQueryBuilder query1 = new HasParentQueryBuilder(\"type\", new MatchAllQueryBuilder(), false)\n+ .innerHit(leafInnerHits, false);\n+ expectThrows(IllegalStateException.class, () -> query1.innerHit().build(searchContext, new InnerHitsContext()));\n+\n+ HasParentQueryBuilder query2 = new HasParentQueryBuilder(\"type\", new MatchAllQueryBuilder(), false)\n+ .innerHit(leafInnerHits, true);\n+ InnerHitsContext innerHitsContext = new InnerHitsContext();\n+ query2.innerHit().build(searchContext, innerHitsContext);\n+ assertThat(innerHitsContext.getInnerHits().size(), equalTo(0));\n+ }\n+\n+\n public static InnerHitBuilder randomInnerHits() {\n return randomInnerHits(true, true);\n }\n@@ -236,9 +295,9 @@ public static InnerHitBuilder randomInnerHits(boolean recursive, boolean include\n if (includeQueryTypeOrPath) {\n QueryBuilder query = new MatchQueryBuilder(randomAsciiOfLengthBetween(1, 16), randomAsciiOfLengthBetween(1, 16));\n if (randomBoolean()) {\n- return new InnerHitBuilder(innerHits, randomAsciiOfLength(8), query);\n+ return new InnerHitBuilder(innerHits, randomAsciiOfLength(8), query, randomBoolean());\n } else {\n- return new InnerHitBuilder(innerHits, query, randomAsciiOfLength(8));\n+ return new InnerHitBuilder(innerHits, query, randomAsciiOfLength(8), randomBoolean());\n }\n } else {\n return innerHits;\n@@ -248,8 +307,8 @@ public static InnerHitBuilder randomInnerHits(boolean recursive, boolean include\n public void testCopyConstructor() throws Exception {\n InnerHitBuilder original = randomInnerHits();\n InnerHitBuilder copy = original.getNestedPath() != null ?\n- new InnerHitBuilder(original, original.getNestedPath(), original.getQuery()) :\n- new InnerHitBuilder(original, original.getQuery(), original.getParentChildType());\n+ new InnerHitBuilder(original, original.getNestedPath(), original.getQuery(), original.isIgnoreUnmapped()) :\n+ new InnerHitBuilder(original, original.getQuery(), original.getParentChildType(), original.isIgnoreUnmapped());\n assertThat(copy, equalTo(original));\n copy = mutate(copy);\n assertThat(copy, not(equalTo(original)));", "filename": "core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java", "status": "modified" }, { "diff": "@@ -73,13 +73,13 @@ protected NestedQueryBuilder doCreateTestQueryBuilder() {\n }\n NestedQueryBuilder nqb = new NestedQueryBuilder(\"nested1\", innerQueryBuilder,\n RandomPicks.randomFrom(random(), ScoreMode.values()));\n+ nqb.ignoreUnmapped(randomBoolean());\n if (randomBoolean()) {\n nqb.innerHit(new InnerHitBuilder()\n .setName(randomAsciiOfLengthBetween(1, 10))\n .setSize(randomIntBetween(0, 100))\n- .addSort(new FieldSortBuilder(INT_FIELD_NAME).order(SortOrder.ASC)));\n+ .addSort(new FieldSortBuilder(INT_FIELD_NAME).order(SortOrder.ASC)), nqb.ignoreUnmapped());\n }\n- nqb.ignoreUnmapped(randomBoolean());\n return nqb;\n }\n ", "filename": "core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -94,7 +94,6 @@\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n-import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.notNullValue;\n \n@@ -822,7 +821,8 @@ public void testHasChildInnerHitsHighlighting() throws Exception {\n SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(\n hasChildQuery(\"child\", matchQuery(\"c_field\", \"foo\"), ScoreMode.None)\n .innerHit(new InnerHitBuilder().setHighlightBuilder(\n- new HighlightBuilder().field(new Field(\"c_field\").highlightQuery(QueryBuilders.matchQuery(\"c_field\", \"bar\"))))))\n+ new HighlightBuilder().field(new Field(\"c_field\")\n+ .highlightQuery(QueryBuilders.matchQuery(\"c_field\", \"bar\")))), false))\n .get();\n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(1L));", "filename": "core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java", "status": "modified" }, { "diff": "@@ -123,7 +123,7 @@ public void testSimpleNested() throws Exception {\n \n SearchResponse response = client().prepareSearch(\"articles\")\n .setQuery(nestedQuery(\"comments\", matchQuery(\"comments.message\", \"fox\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder().setName(\"comment\"))\n+ .innerHit(new InnerHitBuilder().setName(\"comment\"), false)\n ).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -141,7 +141,7 @@ public void testSimpleNested() throws Exception {\n \n response = client().prepareSearch(\"articles\")\n .setQuery(nestedQuery(\"comments\", matchQuery(\"comments.message\", \"elephant\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder().setName(\"comment\"))\n+ .innerHit(new InnerHitBuilder().setName(\"comment\"), false)\n ).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -168,8 +168,8 @@ public void testSimpleNested() throws Exception {\n .addDocValueField(\"comments.message\")\n .addScriptField(\"script\",\n new Script(ScriptType.INLINE, MockScriptEngine.NAME, \"5\", Collections.emptyMap()))\n- .setSize(1)\n- )).get();\n+ .setSize(1),\n+ false)).get();\n assertNoFailures(response);\n innerHits = response.getHits().getAt(0).getInnerHits().get(\"comments\");\n assertThat(innerHits.getTotalHits(), equalTo(2L));\n@@ -207,9 +207,9 @@ public void testRandomNested() throws Exception {\n int size = randomIntBetween(0, numDocs);\n BoolQueryBuilder boolQuery = new BoolQueryBuilder();\n boolQuery.should(nestedQuery(\"field1\", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName(\"a\").setSize(size)\n- .addSort(new FieldSortBuilder(\"_doc\").order(SortOrder.DESC))));\n+ .addSort(new FieldSortBuilder(\"_doc\").order(SortOrder.DESC)), false));\n boolQuery.should(nestedQuery(\"field2\", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName(\"b\")\n- .addSort(new FieldSortBuilder(\"_doc\").order(SortOrder.DESC)).setSize(size)));\n+ .addSort(new FieldSortBuilder(\"_doc\").order(SortOrder.DESC)).setSize(size), false));\n SearchResponse searchResponse = client().prepareSearch(\"idx\")\n .setQuery(boolQuery)\n .setSize(numDocs)\n@@ -260,7 +260,8 @@ public void testSimpleParentChild() throws Exception {\n indexRandom(true, requests);\n \n SearchResponse response = client().prepareSearch(\"articles\")\n- .setQuery(hasChildQuery(\"comment\", matchQuery(\"message\", \"fox\"), ScoreMode.None).innerHit(new InnerHitBuilder()))\n+ .setQuery(hasChildQuery(\"comment\", matchQuery(\"message\", \"fox\"), ScoreMode.None)\n+ .innerHit(new InnerHitBuilder(), false))\n .get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -277,7 +278,8 @@ public void testSimpleParentChild() throws Exception {\n assertThat(innerHits.getAt(1).type(), equalTo(\"comment\"));\n \n response = client().prepareSearch(\"articles\")\n- .setQuery(hasChildQuery(\"comment\", matchQuery(\"message\", \"elephant\"), ScoreMode.None).innerHit(new InnerHitBuilder()))\n+ .setQuery(hasChildQuery(\"comment\", matchQuery(\"message\", \"elephant\"), ScoreMode.None)\n+ .innerHit(new InnerHitBuilder(), false))\n .get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -302,8 +304,8 @@ public void testSimpleParentChild() throws Exception {\n .setHighlightBuilder(new HighlightBuilder().field(\"message\"))\n .setExplain(true).setSize(1)\n .addScriptField(\"script\", new Script(ScriptType.INLINE, MockScriptEngine.NAME, \"5\",\n- Collections.emptyMap()))\n- )\n+ Collections.emptyMap())),\n+ false)\n ).get();\n assertNoFailures(response);\n innerHits = response.getHits().getAt(0).getInnerHits().get(\"comment\");\n@@ -349,9 +351,11 @@ public void testRandomParentChild() throws Exception {\n int size = randomIntBetween(0, numDocs);\n BoolQueryBuilder boolQuery = new BoolQueryBuilder();\n boolQuery.should(constantScoreQuery(hasChildQuery(\"child1\", matchAllQuery(), ScoreMode.None)\n- .innerHit(new InnerHitBuilder().setName(\"a\").addSort(new FieldSortBuilder(\"_uid\").order(SortOrder.ASC)).setSize(size))));\n+ .innerHit(new InnerHitBuilder().setName(\"a\")\n+ .addSort(new FieldSortBuilder(\"_uid\").order(SortOrder.ASC)).setSize(size), false)));\n boolQuery.should(constantScoreQuery(hasChildQuery(\"child2\", matchAllQuery(), ScoreMode.None)\n- .innerHit(new InnerHitBuilder().setName(\"b\").addSort(new FieldSortBuilder(\"_uid\").order(SortOrder.ASC)).setSize(size))));\n+ .innerHit(new InnerHitBuilder().setName(\"b\")\n+ .addSort(new FieldSortBuilder(\"_uid\").order(SortOrder.ASC)).setSize(size), false)));\n SearchResponse searchResponse = client().prepareSearch(\"idx\")\n .setSize(numDocs)\n .setTypes(\"parent\")\n@@ -417,7 +421,7 @@ public void testInnerHitsOnHasParent() throws Exception {\n .setQuery(\n boolQuery()\n .must(matchQuery(\"body\", \"fail2ban\"))\n- .must(hasParentQuery(\"question\", matchAllQuery(), false).innerHit(new InnerHitBuilder()))\n+ .must(hasParentQuery(\"question\", matchAllQuery(), false).innerHit(new InnerHitBuilder(), false))\n ).get();\n assertNoFailures(response);\n assertHitCount(response, 2);\n@@ -455,8 +459,8 @@ public void testParentChildMultipleLayers() throws Exception {\n \n SearchResponse response = client().prepareSearch(\"articles\")\n .setQuery(hasChildQuery(\"comment\",\n- hasChildQuery(\"remark\", matchQuery(\"message\", \"good\"), ScoreMode.None).innerHit(new InnerHitBuilder()),\n- ScoreMode.None).innerHit(new InnerHitBuilder()))\n+ hasChildQuery(\"remark\", matchQuery(\"message\", \"good\"), ScoreMode.None).innerHit(new InnerHitBuilder(), false),\n+ ScoreMode.None).innerHit(new InnerHitBuilder(), false))\n .get();\n \n assertNoFailures(response);\n@@ -476,8 +480,8 @@ public void testParentChildMultipleLayers() throws Exception {\n \n response = client().prepareSearch(\"articles\")\n .setQuery(hasChildQuery(\"comment\",\n- hasChildQuery(\"remark\", matchQuery(\"message\", \"bad\"), ScoreMode.None).innerHit(new InnerHitBuilder()),\n- ScoreMode.None).innerHit(new InnerHitBuilder()))\n+ hasChildQuery(\"remark\", matchQuery(\"message\", \"bad\"), ScoreMode.None).innerHit(new InnerHitBuilder(), false),\n+ ScoreMode.None).innerHit(new InnerHitBuilder(), false))\n .get();\n \n assertNoFailures(response);\n@@ -543,8 +547,8 @@ public void testNestedMultipleLayers() throws Exception {\n .setQuery(\n nestedQuery(\"comments\",\n nestedQuery(\"comments.remarks\", matchQuery(\"comments.remarks.message\", \"good\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder().setName(\"remark\")),\n- ScoreMode.Avg).innerHit(new InnerHitBuilder())\n+ .innerHit(new InnerHitBuilder().setName(\"remark\"), false),\n+ ScoreMode.Avg).innerHit(new InnerHitBuilder(), false)\n ).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -568,7 +572,7 @@ public void testNestedMultipleLayers() throws Exception {\n // Directly refer to the second level:\n response = client().prepareSearch(\"articles\")\n .setQuery(nestedQuery(\"comments.remarks\", matchQuery(\"comments.remarks.message\", \"bad\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder())).get();\n+ .innerHit(new InnerHitBuilder(), false)).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n assertSearchHit(response, 1, hasId(\"2\"));\n@@ -586,8 +590,8 @@ public void testNestedMultipleLayers() throws Exception {\n .setQuery(\n nestedQuery(\"comments\",\n nestedQuery(\"comments.remarks\", matchQuery(\"comments.remarks.message\", \"bad\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder().setName(\"remark\")),\n- ScoreMode.Avg).innerHit(new InnerHitBuilder())\n+ .innerHit(new InnerHitBuilder().setName(\"remark\"), false),\n+ ScoreMode.Avg).innerHit(new InnerHitBuilder(), false)\n ).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -621,7 +625,8 @@ public void testNestedDefinedAsObject() throws Exception {\n indexRandom(true, requests);\n \n SearchResponse response = client().prepareSearch(\"articles\")\n- .setQuery(nestedQuery(\"comments\", matchQuery(\"comments.message\", \"fox\"), ScoreMode.Avg).innerHit(new InnerHitBuilder()))\n+ .setQuery(nestedQuery(\"comments\", matchQuery(\"comments.message\", \"fox\"), ScoreMode.Avg)\n+ .innerHit(new InnerHitBuilder(), false))\n .get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -663,7 +668,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception {\n \n SearchResponse response = client().prepareSearch(\"articles\")\n .setQuery(nestedQuery(\"comments.messages\", matchQuery(\"comments.messages.message\", \"fox\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder())).get();\n+ .innerHit(new InnerHitBuilder(), false)).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n SearchHit hit = response.getHits().getAt(0);\n@@ -677,7 +682,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception {\n \n response = client().prepareSearch(\"articles\")\n .setQuery(nestedQuery(\"comments.messages\", matchQuery(\"comments.messages.message\", \"bear\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder())).get();\n+ .innerHit(new InnerHitBuilder(), false)).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n hit = response.getHits().getAt(0);\n@@ -698,7 +703,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception {\n indexRandom(true, requests);\n response = client().prepareSearch(\"articles\")\n .setQuery(nestedQuery(\"comments.messages\", matchQuery(\"comments.messages.message\", \"fox\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder())).get();\n+ .innerHit(new InnerHitBuilder(), false)).get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n hit = response.getHits().getAt(0);;\n@@ -739,15 +744,16 @@ public void testRoyals() throws Exception {\n .setTypes(\"duke\")\n .setQuery(boolQuery()\n .filter(hasParentQuery(\"prince\",\n- hasParentQuery(\"king\", matchAllQuery(), false).innerHit(new InnerHitBuilder().setName(\"kings\")),\n- false).innerHit(new InnerHitBuilder().setName(\"princes\"))\n+ hasParentQuery(\"king\", matchAllQuery(), false).innerHit(new InnerHitBuilder().setName(\"kings\"), false),\n+ false).innerHit(new InnerHitBuilder().setName(\"princes\"), false)\n )\n .filter(hasChildQuery(\"earl\",\n- hasChildQuery(\"baron\", matchAllQuery(), ScoreMode.None).innerHit(new InnerHitBuilder().setName(\"barons\")),\n+ hasChildQuery(\"baron\", matchAllQuery(), ScoreMode.None)\n+ .innerHit(new InnerHitBuilder().setName(\"barons\"), false),\n ScoreMode.None).innerHit(new InnerHitBuilder()\n .addSort(SortBuilders.fieldSort(\"_uid\").order(SortOrder.ASC))\n .setName(\"earls\")\n- .setSize(4))\n+ .setSize(4), false)\n )\n )\n .get();\n@@ -860,7 +866,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception {\n .should(termQuery(\"nested1.n_field1\", \"n_value1_3\").queryName(\"test2\"))\n .should(termQuery(\"nested1.n_field2\", \"n_value2_2\").queryName(\"test3\"));\n query = nestedQuery(\"nested1\", query, ScoreMode.Avg).innerHit(\n- new InnerHitBuilder().addSort(new FieldSortBuilder(\"nested1.n_field1\").order(SortOrder.ASC)));\n+ new InnerHitBuilder().addSort(new FieldSortBuilder(\"nested1.n_field1\").order(SortOrder.ASC)), false);\n SearchResponse searchResponse = client().prepareSearch(\"test\")\n .setQuery(query)\n .setSize(numDocs)\n@@ -902,7 +908,7 @@ public void testMatchesQueriesParentChildInnerHits() throws Exception {\n \n SearchResponse response = client().prepareSearch(\"index\")\n .setQuery(hasChildQuery(\"child\", matchQuery(\"field\", \"value1\").queryName(\"_name1\"), ScoreMode.None)\n- .innerHit(new InnerHitBuilder()))\n+ .innerHit(new InnerHitBuilder(), false))\n .addSort(\"_uid\", SortOrder.ASC)\n .get();\n assertHitCount(response, 2);\n@@ -917,7 +923,7 @@ public void testMatchesQueriesParentChildInnerHits() throws Exception {\n assertThat(response.getHits().getAt(1).getInnerHits().get(\"child\").getAt(0).getMatchedQueries()[0], equalTo(\"_name1\"));\n \n QueryBuilder query = hasChildQuery(\"child\", matchQuery(\"field\", \"value2\").queryName(\"_name2\"), ScoreMode.None)\n- .innerHit(new InnerHitBuilder());\n+ .innerHit(new InnerHitBuilder(), false);\n response = client().prepareSearch(\"index\")\n .setQuery(query)\n .addSort(\"_uid\", SortOrder.ASC)\n@@ -937,7 +943,7 @@ public void testDontExplode() throws Exception {\n indexRandom(true, requests);\n \n QueryBuilder query = hasChildQuery(\"child\", matchQuery(\"field\", \"value1\"), ScoreMode.None)\n- .innerHit(new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1));\n+ .innerHit(new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1), false);\n SearchResponse response = client().prepareSearch(\"index1\")\n .setQuery(query)\n .addSort(\"_uid\", SortOrder.ASC)\n@@ -957,7 +963,7 @@ public void testDontExplode() throws Exception {\n .get();\n \n query = nestedQuery(\"nested\", matchQuery(\"nested.field\", \"value1\"), ScoreMode.Avg)\n- .innerHit(new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1));\n+ .innerHit(new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1), false);\n response = client().prepareSearch(\"index2\")\n .setQuery(query)\n .addSort(\"_uid\", SortOrder.ASC)\n@@ -983,7 +989,7 @@ public void testNestedSourceFiltering() throws Exception {\n SearchResponse response = client().prepareSearch()\n .setQuery(nestedQuery(\"comments\", matchQuery(\"comments.message\", \"fox\"), ScoreMode.None)\n .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(true,\n- new String[]{\"comments.message\"}, null))))\n+ new String[]{\"comments.message\"}, null)), false))\n .get();\n assertNoFailures(response);\n assertHitCount(response, 1);\n@@ -1004,12 +1010,48 @@ public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception {\n SearchResponse response = client().prepareSearch(\"test\")\n .setQuery(boolQuery().must(matchQuery(\"key\", \"value\"))\n .should(hasChildQuery(\"child_type\", nestedQuery(\"nested_type\", matchAllQuery(), ScoreMode.None)\n- .innerHit(new InnerHitBuilder()), ScoreMode.None).innerHit(new InnerHitBuilder())))\n+ .innerHit(new InnerHitBuilder(), false), ScoreMode.None).innerHit(new InnerHitBuilder(), false)))\n .get();\n assertHitCount(response, 1);\n SearchHit hit = response.getHits().getAt(0);\n assertThat(hit.getInnerHits().get(\"child_type\").getAt(0).field(\"_parent\").getValue(), equalTo(\"1\"));\n assertThat(hit.getInnerHits().get(\"child_type\").getAt(0).getInnerHits().get(\"nested_type\").getAt(0).field(\"_parent\"), nullValue());\n }\n \n+ public void testInnerHitsWithIgnoreUnmapped() throws Exception {\n+ assertAcked(prepareCreate(\"index1\")\n+ .addMapping(\"parent_type\", \"nested_type\", \"type=nested\")\n+ .addMapping(\"child_type\", \"_parent\", \"type=parent_type\")\n+ );\n+ createIndex(\"index2\");\n+ client().prepareIndex(\"index1\", \"parent_type\", \"1\").setSource(\"nested_type\", Collections.singletonMap(\"key\", \"value\")).get();\n+ client().prepareIndex(\"index1\", \"child_type\", \"2\").setParent(\"1\").setSource(\"{}\").get();\n+ client().prepareIndex(\"index2\", \"type\", \"3\").setSource(\"key\", \"value\").get();\n+ refresh();\n+\n+ SearchResponse response = client().prepareSearch(\"index1\", \"index2\")\n+ .setQuery(boolQuery()\n+ .should(nestedQuery(\"nested_type\", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true)\n+ .innerHit(new InnerHitBuilder(), true))\n+ .should(termQuery(\"key\", \"value\"))\n+ )\n+ .addSort(\"_uid\", SortOrder.ASC)\n+ .get();\n+ assertNoFailures(response);\n+ assertHitCount(response, 2);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"1\"));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setQuery(boolQuery()\n+ .should(hasChildQuery(\"child_type\", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true)\n+ .innerHit(new InnerHitBuilder(), true))\n+ .should(termQuery(\"key\", \"value\"))\n+ )\n+ .addSort(\"_uid\", SortOrder.ASC)\n+ .get();\n+ assertNoFailures(response);\n+ assertHitCount(response, 2);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"1\"));\n+ }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java", "status": "modified" } ] }
{ "body": "Dears,\r\n\r\nWe suppose, we found a performance problem in the elasticsearch code.\r\n\r\n**Elasticsearch version**: 2.4.1 but also master branch\r\n**Plugins installed**: [] Shield, but it has nothing to do with the problem\r\n**JVM version**: 1.8.112\r\n**OS version**: Windows, Linux\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nWe had spend some time on testing Elasticsearch performance. \r\nWe think, we found a problem in the JAVA API (client) for the Elasticsearch. \r\nWe performed tests making regular logging (with time stamps) as well as running software with JProfiler.\r\n\r\nThe performace problem is located in the elasticsearch/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java in the method: `public void writeString(String str)`\r\n\r\nAnd we suppose the problem is that writeString uses in the loop `writeByte `method (elasticsearch/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java) \r\nwhich uses `ensureCapacity `method (elasticsearch/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java ), \r\nwhich calls `grow `method (elasticsearch/core/src/main/java/org/elasticsearch/common/util/BigArrays.java).\r\n\r\nThe regular usage of the writeByte has no problem with ensure capacity and grow. But ...\r\n\r\nJProfiler shows us, when it comes to call `writeString` the method call chain is higly inefficient,\r\nbecause writeString **for every** character in the string calls all mentioned methods to write single byte, which needs to ensure capacity and grows Arrays - byte by byte. \r\nThis is made for every document, which send to Elasticsearch, which is normally not a short string. \r\nIn this operation there are many objects copied and there are executed many memory reallocations in the loop for single bytes.\r\n\r\nWe also made code review in the master brach, and it confirms what JProfiler states. The `writeString` call tree is exactly using those heavy methods for each and every byte.\r\n\r\nCall-graph is attached.\r\n![call_graph](https://cloud.githubusercontent.com/assets/8561546/20434245/458e9c84-ada7-11e6-9746-0841393de53e.png)\r\n\r\n\r\n**Describe the feature**:\r\n\r\nThe proposal would be to modify writeString method **not** to use writeByte foreach and every byte,\r\nbut to do the following:\r\n\r\n1. Check the needed size of the string/array/buffer (once)\r\n2. Allocate memory for the needed size (once)\r\n3. Perform \"unsafe\" copy of the string (once) - it is no more unsafe, as memory is already provided.\r\n4. Execute special modifications on characters **in the already copied string**, which are done in writeString method - this might be done in a kind of a loop.\r\n\r\nWhat do you think of it?\r\n\r\nWe are testing now a kind of simple check if this proposal is helping us, \r\nbut we do not know very good your codebase, so we cannot provide complete bug fix, \r\nwhich will be widely tested.\r\n\r\nThanks in advace for any input to this problem.\r\n\r\nBest regards,\r\nSeweryn.\r\n", "comments": [ { "body": "I agree we should not call `ensureCapacity` on every byte of a utf8 string.\n", "created_at": "2016-11-18T17:27:39Z" } ], "number": 21660, "title": "Performance problems with StreamOutput.writeString" }
{ "body": "Today we call `writeByte` up to 3x per character in each string written via\r\n`StreamOutput#writeString` this can have quite some overhead when strings\r\nare long or many strings are written. This change adds a local buffer to\r\nconvert chars to bytes into the local buffer. Converted bytes are then\r\nwritten via `writeBytes` instead reducing the overhead of this operation.\r\n\r\nCloses #21660", "number": 21680, "review_comments": [ { "body": "I think you can replace the three above lines with just `convertStringBuffer = ArrayUtil.grow(convertStringBuffer, bufferSize);`\n", "created_at": "2016-11-19T16:32:05Z" }, { "body": "I think it should be a strict greater than?\n", "created_at": "2016-11-19T18:47:32Z" }, { "body": "maybe also test an explicit big string that only contains chars that are stored on 3 bytes?\n", "created_at": "2016-11-19T18:48:37Z" }, { "body": "so I had this before but there is no need to copy the array since we are trashing it that's why I used oversize?", "created_at": "2016-11-21T07:55:15Z" }, { "body": "done", "created_at": "2016-11-21T08:23:18Z" } ], "title": "Use a buffer to do character to byte conversion in StreamOutput#writeString" }
{ "commits": [ { "message": "Use a buffer to do character to byte conversion in StreamOutput#writeString\n\nToday we call `writeByte` up to 3x per character in each string written via\n`StreamOutput#writeString` this can have quite some overhead when strings\nare long or many strings are written. This change adds a local buffer to\nconvert chars to bytes into the local buffer. Converted bytes are then\nwritten via `writeBytes` instead reducing the overhead of this opertion.\n\nCloses #21660" }, { "message": "apply feedback" }, { "message": "add comment" }, { "message": "simplify loop in StreamInput#readString" }, { "message": "make readString consistent with writeString and add comments" } ], "files": [ { "diff": "@@ -24,8 +24,10 @@\n import org.apache.lucene.index.IndexFormatTooOldException;\n import org.apache.lucene.store.AlreadyClosedException;\n import org.apache.lucene.store.LockObtainFailedException;\n+import org.apache.lucene.util.ArrayUtil;\n import org.apache.lucene.util.BitUtil;\n import org.apache.lucene.util.BytesRef;\n+import org.apache.lucene.util.CharsRef;\n import org.apache.lucene.util.CharsRefBuilder;\n import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.Version;\n@@ -323,15 +325,22 @@ public Integer readOptionalVInt() throws IOException {\n return null;\n }\n \n- private final CharsRefBuilder spare = new CharsRefBuilder();\n+ // we don't use a CharsRefBuilder since we exactly know the size of the character array up front\n+ // this prevents calling grow for every character since we don't need this\n+ private final CharsRef spare = new CharsRef();\n \n public String readString() throws IOException {\n+ // TODO it would be nice to not call readByte() for every character but we don't know how much to read up-front\n+ // we can make the loop much more complicated but that won't buy us much compared to the bounds checks in readByte()\n final int charCount = readVInt();\n- spare.clear();\n- spare.grow(charCount);\n- int c;\n- while (spare.length() < charCount) {\n- c = readByte() & 0xff;\n+ if (spare.chars.length < charCount) {\n+ // we don't use ArrayUtils.grow since there is no need to copy the array\n+ spare.chars = new char[ArrayUtil.oversize(charCount, Character.BYTES)];\n+ }\n+ spare.length = charCount;\n+ final char[] buffer = spare.chars;\n+ for (int i = 0; i < charCount; i++) {\n+ final int c = readByte() & 0xff;\n switch (c >> 4) {\n case 0:\n case 1:\n@@ -341,15 +350,17 @@ public String readString() throws IOException {\n case 5:\n case 6:\n case 7:\n- spare.append((char) c);\n+ buffer[i] = (char) c;\n break;\n case 12:\n case 13:\n- spare.append((char) ((c & 0x1F) << 6 | readByte() & 0x3F));\n+ buffer[i] = ((char) ((c & 0x1F) << 6 | readByte() & 0x3F));\n break;\n case 14:\n- spare.append((char) ((c & 0x0F) << 12 | (readByte() & 0x3F) << 6 | (readByte() & 0x3F) << 0));\n+ buffer[i] = ((char) ((c & 0x0F) << 12 | (readByte() & 0x3F) << 6 | (readByte() & 0x3F) << 0));\n break;\n+ default:\n+ new AssertionError(\"unexpected character: \" + c + \" hex: \" + Integer.toHexString(c));\n }\n }\n return spare.toString();", "filename": "core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import org.apache.lucene.index.IndexFormatTooOldException;\n import org.apache.lucene.store.AlreadyClosedException;\n import org.apache.lucene.store.LockObtainFailedException;\n+import org.apache.lucene.util.ArrayUtil;\n import org.apache.lucene.util.BitUtil;\n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.BytesRefBuilder;\n@@ -298,23 +299,41 @@ public void writeText(Text text) throws IOException {\n }\n }\n \n+ // we use a small buffer to convert strings to bytes since we want to prevent calling writeByte\n+ // for every byte in the string (see #21660 for details).\n+ // This buffer will never be the oversized limit of 1024 bytes and will not be shared across streams\n+ private byte[] convertStringBuffer = BytesRef.EMPTY_BYTES; // TODO should we reduce it to 0 bytes once the stream is closed?\n+\n public void writeString(String str) throws IOException {\n- int charCount = str.length();\n+ final int charCount = str.length();\n+ final int bufferSize = Math.min(3 * charCount, 1024); // at most 3 bytes per character is needed here\n+ if (convertStringBuffer.length < bufferSize) { // we don't use ArrayUtils.grow since copying the bytes is unnecessary\n+ convertStringBuffer = new byte[ArrayUtil.oversize(bufferSize, Byte.BYTES)];\n+ }\n+ byte[] buffer = convertStringBuffer;\n+ int offset = 0;\n writeVInt(charCount);\n- int c;\n for (int i = 0; i < charCount; i++) {\n- c = str.charAt(i);\n+ final int c = str.charAt(i);\n if (c <= 0x007F) {\n- writeByte((byte) c);\n+ buffer[offset++] = ((byte) c);\n } else if (c > 0x07FF) {\n- writeByte((byte) (0xE0 | c >> 12 & 0x0F));\n- writeByte((byte) (0x80 | c >> 6 & 0x3F));\n- writeByte((byte) (0x80 | c >> 0 & 0x3F));\n+ buffer[offset++] = ((byte) (0xE0 | c >> 12 & 0x0F));\n+ buffer[offset++] = ((byte) (0x80 | c >> 6 & 0x3F));\n+ buffer[offset++] = ((byte) (0x80 | c >> 0 & 0x3F));\n } else {\n- writeByte((byte) (0xC0 | c >> 6 & 0x1F));\n- writeByte((byte) (0x80 | c >> 0 & 0x3F));\n+ buffer[offset++] = ((byte) (0xC0 | c >> 6 & 0x1F));\n+ buffer[offset++] = ((byte) (0x80 | c >> 0 & 0x3F));\n+ }\n+ // make sure any possible char can fit into the buffer in any possible iteration\n+ // we need at most 3 bytes so we flush the buffer once we have less than 3 bytes\n+ // left before we start another iteration\n+ if (offset > buffer.length-3) {\n+ writeBytes(buffer, offset);\n+ offset = 0;\n }\n }\n+ writeBytes(buffer, offset);\n }\n \n public void writeFloat(float v) throws IOException {", "filename": "core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java", "status": "modified" }, { "diff": "@@ -19,7 +19,9 @@\n \n package org.elasticsearch.common.io.stream;\n \n+import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.Constants;\n+import org.apache.lucene.util.UnicodeUtil;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.geo.GeoPoint;\n@@ -657,4 +659,41 @@ private static <K, V> Map<K, V> randomMap(Map<K, V> map, int size, Supplier<K> k\n IntStream.range(0, size).forEach(i -> map.put(keyGenerator.get(), valueGenerator.get()));\n return map;\n }\n+\n+ public void testWriteRandomStrings() throws IOException {\n+ final int iters = scaledRandomIntBetween(5, 20);\n+ for (int iter = 0; iter < iters; iter++) {\n+ List<String> strings = new ArrayList<>();\n+ int numStrings = randomIntBetween(100, 1000);\n+ BytesStreamOutput output = new BytesStreamOutput(0);\n+ for (int i = 0; i < numStrings; i++) {\n+ String s = randomRealisticUnicodeOfLengthBetween(0, 2048);\n+ strings.add(s);\n+ output.writeString(s);\n+ }\n+\n+ try (StreamInput streamInput = output.bytes().streamInput()) {\n+ for (int i = 0; i < numStrings; i++) {\n+ String s = streamInput.readString();\n+ assertEquals(strings.get(i), s);\n+ }\n+ }\n+ }\n+ }\n+\n+ /*\n+ * tests the extreme case where characters use more than 2 bytes\n+ */\n+ public void testWriteLargeSurrogateOnlyString() throws IOException {\n+ String deseretLetter = \"\\uD801\\uDC00\";\n+ assertEquals(2, deseretLetter.length());\n+ String largeString = IntStream.range(0, 2048).mapToObj(s -> deseretLetter).collect(Collectors.joining(\"\")).trim();\n+ assertEquals(\"expands to 4 bytes\", 4, new BytesRef(deseretLetter).length);\n+ try (BytesStreamOutput output = new BytesStreamOutput(0)) {\n+ output.writeString(largeString);\n+ try (StreamInput streamInput = output.bytes().streamInput()) {\n+ assertEquals(largeString, streamInput.readString());\n+ }\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java", "status": "modified" } ] }
{ "body": "We currently have concurrency issue between the static methods on the Store class and store changes that are done via a valid open store. An example of this is the async shard fetch which can reach out to a node while a local shard copy is shutting down (the fetch does check if we have an open shard and tries to use that first, but if the shard is shutting down, it will not be available from IndexService).\n\nSpecifically, async shard fetching tries to read metadata from store, concurrently the shard that shuts down commits to lucene, changing the segments_N file. this causes a file not find exception on the shard fetching side. That one in turns makes the master think the shard is unusable. In tests this can cause the shard assignment to be delayed (up to 1m) which fails tests. See https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+java9-periodic/570 for details.\n\nThis is one of the things #18938 caused to bubble up.\n", "comments": [ { "body": "left on nit - LGTM otherwise\n", "created_at": "2016-07-18T08:27:01Z" }, { "body": "LGTM\n", "created_at": "2016-07-18T08:30:14Z" } ], "number": 19416, "title": "Make static Store access shard lock aware" }
{ "body": "PR #19416 added a safety mechanism to shard state fetching to only access the store when the shard lock can be acquired. This can lead to the following situation however where a shard has not fully shut down yet while the shard fetching is going on, resulting in a `ShardLockObtainFailedException`. `PrimaryShardAllocator` that decides where to allocate primary shards sees this exception and treats the shard as unusable. If this is the only shard copy in the cluster, the cluster stays red and a new shard fetching cycle will not be triggered as shard state fetching treats exceptions while opening the store as permanent failures.\r\n\r\nThis PR makes it so that `PrimaryShardAllocator` treats the locked shard as a possible allocation target (although with the least priority).", "number": 21656, "review_comments": [ { "body": "question - why not let `Store.tryOpenIndex` throw `ShardLockObtainFailedException` and catch it here directly, logging it and not storing it as a \"store\" exception? The first part will make things simpler imo. I'm fine with not going with the second part and doing it like you propose but wanted to better understand your reasoning.\n", "created_at": "2016-11-18T18:43:48Z" }, { "body": "I can directly throw `ShardLockObtainFailedException`. I wasn't sure why you wrapped that exception in the first place so I left as is.\n\nTo your second point, I left the decision on how to treat the `ShardLockObtainFailedException` to `PrimaryShardAllocator` as it has more context available to make the final decision where to allocate the shard. For example, it prioritizes another valid shard copy that has not thrown the exception. Also it allows the shard store action `/_shard_stores` to properly expose the exception as it reuses the same endpoint as the primary shard allocator.\n", "created_at": "2016-11-18T19:00:25Z" }, { "body": "> I can directly throw ShardLockObtainFailedException. I wasn't sure why you wrapped that exception in the first place so I left as is.\n\nI _think_ that I just didn't want to extend the scope of the change and I didn't have a reason to not just throw an IOException. I think the unwrapping here merits this?\n\n> For example, it prioritizes another valid shard copy that has not thrown the exception. Also it allows the shard store action /_shard_stores to properly expose the exception as it reuses the same endpoint as the primary shard allocator.\n\nFair enough. Thanks.\n", "created_at": "2016-11-18T19:11:28Z" }, { "body": "can we assert that the store exception is what we expect it to be?", "created_at": "2016-11-22T13:10:08Z" }, { "body": "same request for assertion.", "created_at": "2016-11-22T13:10:41Z" }, { "body": "did you run into this being a problem? how can we open an index that was created before 5.0.0 and never had insync replicas but does have allocationId? the only thing I can think of is a node network issue during shard initialization. I'm wondering if we need to optimize for this and no keep this code simple (i.e., demote shards with a lock exception)", "created_at": "2016-11-22T13:21:37Z" }, { "body": "can we add tests for cases with more shards? for example the case where we \"prefer\" other shard copies with this exception?", "created_at": "2016-11-22T13:25:05Z" }, { "body": "Sure, I've added more tests", "created_at": "2016-11-22T16:07:25Z" }, { "body": "We special-case this a few lines up as well (but it's not easy to do code reuse across those lines). For symmetry reasons I have kept it as is. The code is documented as well.", "created_at": "2016-11-22T16:08:42Z" }, { "body": "yay!", "created_at": "2016-11-22T17:06:31Z" }, { "body": "fancy pants. ", "created_at": "2016-11-22T17:12:40Z" }, { "body": "fair enough", "created_at": "2016-11-22T17:15:31Z" } ], "title": "Allow master to assign primary shard to node that has shard store locked during shard state fetching" }
{ "commits": [ { "message": "Allow master to assign primary shard to node that has shard store locked during shard state fetching" }, { "message": "review comments" } ], "files": [ { "diff": "@@ -38,6 +38,7 @@\n import org.elasticsearch.common.settings.Setting;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.env.ShardLockObtainFailedException;\n import org.elasticsearch.gateway.AsyncShardFetch.FetchResult;\n import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;\n import org.elasticsearch.index.shard.ShardStateMetaData;\n@@ -256,6 +257,11 @@ private static Map<String, Decision> buildNodeDecisions(NodesToAllocate nodesToA\n return nodeDecisions;\n }\n \n+ private static final Comparator<NodeGatewayStartedShards> NO_STORE_EXCEPTION_FIRST_COMPARATOR =\n+ Comparator.comparing((NodeGatewayStartedShards state) -> state.storeException() == null).reversed();\n+ private static final Comparator<NodeGatewayStartedShards> PRIMARY_FIRST_COMPARATOR =\n+ Comparator.comparing(NodeGatewayStartedShards::primary).reversed();\n+\n /**\n * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching\n * inSyncAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but\n@@ -265,8 +271,7 @@ protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRo\n Set<String> ignoreNodes, Set<String> inSyncAllocationIds,\n FetchResult<NodeGatewayStartedShards> shardState,\n Logger logger) {\n- LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();\n- LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();\n+ List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>();\n int numberOfAllocationsFound = 0;\n for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {\n DiscoveryNode node = nodeShardState.getNode();\n@@ -287,31 +292,36 @@ protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRo\n }\n } else {\n final String finalAllocationId = allocationId;\n- logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id\", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());\n- allocationId = null;\n+ if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) {\n+ logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard\", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());\n+ } else {\n+ logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id\", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());\n+ allocationId = null;\n+ }\n }\n \n if (allocationId != null) {\n+ assert nodeShardState.storeException() == null ||\n+ nodeShardState.storeException() instanceof ShardLockObtainFailedException :\n+ \"only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a store throwing \" + nodeShardState.storeException();\n numberOfAllocationsFound++;\n- if (inSyncAllocationIds.contains(allocationId)) {\n- if (nodeShardState.primary()) {\n- matchingNodeShardStates.addFirst(nodeShardState);\n- } else {\n- matchingNodeShardStates.addLast(nodeShardState);\n- }\n- } else if (matchAnyShard) {\n- if (nodeShardState.primary()) {\n- nonMatchingNodeShardStates.addFirst(nodeShardState);\n- } else {\n- nonMatchingNodeShardStates.addLast(nodeShardState);\n- }\n+ if (matchAnyShard || inSyncAllocationIds.contains(nodeShardState.allocationId())) {\n+ nodeShardStates.add(nodeShardState);\n }\n }\n }\n \n- List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>();\n- nodeShardStates.addAll(matchingNodeShardStates);\n- nodeShardStates.addAll(nonMatchingNodeShardStates);\n+ final Comparator<NodeGatewayStartedShards> comparator; // allocation preference\n+ if (matchAnyShard) {\n+ // prefer shards with matching allocation ids\n+ Comparator<NodeGatewayStartedShards> matchingAllocationsFirst = Comparator.comparing(\n+ (NodeGatewayStartedShards state) -> inSyncAllocationIds.contains(state.allocationId())).reversed();\n+ comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR).thenComparing(PRIMARY_FIRST_COMPARATOR);\n+ } else {\n+ comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR);\n+ }\n+\n+ nodeShardStates.sort(comparator);\n \n if (logger.isTraceEnabled()) {\n logger.trace(\"{} candidates for allocation: {}\", shard, nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(\", \")));\n@@ -412,10 +422,19 @@ static NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, bo\n logger.trace(\"[{}] on node [{}] has allocation id [{}]\", shard, nodeShardState.getNode(), nodeShardState.allocationId());\n }\n } else {\n- final long finalVerison = version;\n- // when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist)\n- logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"[{}] on node [{}] has version [{}] but the store can not be opened, treating no version\", shard, nodeShardState.getNode(), finalVerison), nodeShardState.storeException());\n- version = ShardStateMetaData.NO_VERSION;\n+ final long finalVersion = version;\n+ if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) {\n+ logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"[{}] on node [{}] has version [{}] but the store can not be opened as it's locked, treating as valid shard\", shard, nodeShardState.getNode(), finalVersion), nodeShardState.storeException());\n+ if (nodeShardState.allocationId() != null) {\n+ version = Long.MAX_VALUE; // shard was already selected in a 5.x cluster as primary, prefer this shard copy again.\n+ } else {\n+ version = 0L; // treat as lowest version so that this shard is the least likely to be selected as primary\n+ }\n+ } else {\n+ // disregard the reported version and assign it as no version (same as shard does not exist)\n+ logger.trace((Supplier<?>) () -> new ParameterizedMessage(\"[{}] on node [{}] has version [{}] but the store can not be opened, treating no version\", shard, nodeShardState.getNode(), finalVersion), nodeShardState.storeException());\n+ version = ShardStateMetaData.NO_VERSION;\n+ }\n }\n \n if (version != ShardStateMetaData.NO_VERSION) {", "filename": "core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java", "status": "modified" }, { "diff": "@@ -414,15 +414,12 @@ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId sh\n * segment infos and possible corruption markers. If the index can not\n * be opened, an exception is thrown\n */\n- public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException {\n+ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException {\n try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5));\n Directory dir = new SimpleFSDirectory(indexLocation)) {\n failIfCorrupted(dir, shardId);\n SegmentInfos segInfo = Lucene.readSegmentInfos(dir);\n logger.trace(\"{} loaded segment info [{}]\", shardId, segInfo);\n- } catch (ShardLockObtainFailedException ex) {\n- logger.error((Supplier<?>) () -> new ParameterizedMessage(\"{} unable to acquire shard lock\", shardId), ex);\n- throw new IOException(ex);\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/store/Store.java", "status": "modified" }, { "diff": "@@ -46,6 +46,7 @@\n import org.elasticsearch.common.UUIDs;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.util.set.Sets;\n+import org.elasticsearch.env.ShardLockObtainFailedException;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.shard.ShardStateMetaData;\n import org.elasticsearch.snapshots.Snapshot;\n@@ -174,6 +175,69 @@ public void testStoreException() {\n assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);\n }\n \n+ /**\n+ * Tests that when the node returns a ShardLockObtainFailedException, it will be considered as a valid shard copy\n+ */\n+ public void testShardLockObtainFailedException() {\n+ final RoutingAllocation allocation;\n+ boolean useAllocationIds = randomBoolean();\n+ if (useAllocationIds) {\n+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,\n+ randomFrom(Version.V_2_0_0, Version.CURRENT), \"allocId1\");\n+ testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, \"allocId1\", randomBoolean(),\n+ new ShardLockObtainFailedException(shardId, \"test\"));\n+ } else {\n+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_1);\n+ testAllocator.addData(node1, 3, null, randomBoolean(), new ShardLockObtainFailedException(shardId, \"test\"));\n+ }\n+ testAllocator.allocateUnassigned(allocation);\n+ assertThat(allocation.routingNodesChanged(), equalTo(true));\n+ assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));\n+ assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));\n+ assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));\n+ if (useAllocationIds) {\n+ // check that allocation id is reused\n+ assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo(\"allocId1\"));\n+ }\n+ assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);\n+ }\n+\n+ /**\n+ * Tests that when one node returns a ShardLockObtainFailedException and another properly loads the store, it will\n+ * select the second node as target\n+ */\n+ public void testShardLockObtainFailedExceptionPreferOtherValidCopies() {\n+ final RoutingAllocation allocation;\n+ boolean useAllocationIds = randomBoolean();\n+ String allocId1 = randomAsciiOfLength(10);\n+ String allocId2 = randomAsciiOfLength(10);\n+ if (useAllocationIds) {\n+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,\n+ randomFrom(Version.V_2_0_0, Version.CURRENT), allocId1, allocId2);\n+ testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, allocId1, randomBoolean(),\n+ new ShardLockObtainFailedException(shardId, \"test\"));\n+ testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, allocId2, randomBoolean(), null);\n+ } else {\n+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_1);\n+ testAllocator.addData(node1, 3, null, randomBoolean(), new ShardLockObtainFailedException(shardId, \"test\"));\n+ if (randomBoolean()) {\n+ testAllocator.addData(node2, randomIntBetween(2, 4), null, randomBoolean(), null);\n+ } else {\n+ testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, \"some alloc id\", randomBoolean(), null);\n+ }\n+ }\n+ testAllocator.allocateUnassigned(allocation);\n+ assertThat(allocation.routingNodesChanged(), equalTo(true));\n+ assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));\n+ assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));\n+ assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));\n+ if (useAllocationIds) {\n+ // check that allocation id is reused\n+ assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo(allocId2));\n+ }\n+ assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);\n+ }\n+\n /**\n * Tests that when there is a node to allocate the shard to, it will be allocated to it.\n */", "filename": "core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java", "status": "modified" } ] }
{ "body": "<!--\r\nGitHub is reserved for bug reports and feature requests. The best place\r\nto ask a general question is at the Elastic Discourse forums at\r\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\r\na feature request, please include one and only one of the below blocks\r\nin your new issue. Note that whether you're filing a bug report or a\r\nfeature request, ensure that your submission is for an\r\n[OS that we support](https://www.elastic.co/support/matrix#show_os).\r\nBug reports on an OS that we do not support or feature requests\r\nspecific to an OS that we do not support will be closed.\r\n-->\r\n\r\n<!--\r\nIf you are filing a bug report, please remove the below feature\r\nrequest block and provide responses for all of the below items.\r\n-->\r\n\r\n**Elasticsearch version**: 5.0\r\n\r\n**Plugins installed**: N/A\r\n\r\n**JVM version**: 1.8u112\r\n\r\n**OS version**: Windows Server 2012\r\n\r\n**Description of the problem including expected versus actual behavior**: Highlighting dynamic fields which are stored as keyword is not working as expected. When searching over _all I would expect highlighting to occur on all fields of the result when using `require_field_match: false`. Example query can be found here:\r\n\r\nhttp://pastebin.com/sAGczFhU\r\n\r\nMy use case is that nearly every search is done for an exact value or over _all. I could get extreme performance gains by shifting dynamically created fields to Keyword, and then if full-text search is needed, defining them explicitly in the mapping. However, search highlighting is still an important part of our workflow.\r\n\r\nDo I have a misunderstanding of how highlighting works? My interpretation is that\r\n1. We search over _all\r\n2. Hits are selected for highlighting as a postprocessing step\r\n3. Each field is matched against the original search query if require_field_match: false is set to true and the highlighting_query option is unused.\r\n\r\n**Steps to reproduce**:\r\n 1. Create a new index with a mapping that sets dynamic fields to type Keyword\r\n 2. Add three new documents with `\"message\": \"Kostya Test\"`\r\n 3. Run the previous query (fixing the date range)\r\n\r\nExpected: There should be highlighted search text extracted from the _source fields that are loaded at highlight-time\r\nActual: No text is highlighted\r\n\r\nThe same can be repeated where the search is an exact match for the field, and no highlighting is done then either.\r\n", "comments": [ { "body": "Hi @nostrebor,\nPlease ask questions like these on the discussion forum: https://discuss.elastic.co/\nWe reserve Github for issues and feature requests.\nRegarding your issue each field is highlighted using its own analyzer. Since you're using a keyword analyzer the only query that can be highlighted would be the entire value of the field. So it would only work with the query \"Kostya Test\".\n", "created_at": "2016-11-17T20:59:22Z" }, { "body": "I thought that might be the case so I tried it. As I mentioned in my original post, the same can be repeated where the search is an exact match for the field, and no highlighting is done then either.\n", "created_at": "2016-11-17T21:23:45Z" }, { "body": "Ok now I understand the problem @nostrebor \nThis simple recreation exhibits the problem:\n\n```\nPUT t\n{\n \"mappings\": {\n \"t\": {\n \"properties\": {\n \"message\": {\n \"type\": \"keyword\",\n \"store\": true\n }\n }\n }\n }\n}\n\nPUT t/t/1\n{\n \"message\": \"foo\"\n}\n\nGET _search\n{\n \"size\": 1,\n \"stored_fields\": \"message\",\n \"query\": {\n \"match\": {\n \"message\": \"foo\"\n }\n },\n \"highlight\": {\n \"fields\": {\n \"message\": {\n \"type\": \"plain\",\n \"no_match_size\": 10\n }\n }\n }\n}\n```\n\n... returns:\n\n```\n{\n \"took\": 1,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.2876821,\n \"hits\": [\n {\n \"_index\": \"t\",\n \"_type\": \"t\",\n \"_id\": \"1\",\n \"_score\": 0.2876821,\n \"fields\": {\n \"message\": [\n \"foo\"\n ]\n },\n \"highlight\": {\n \"message\": [\n \"[66 6f 6f]\"\n ]\n }\n }\n ]\n }\n}\n```\n\nThis is due to the fact that the keyword field is stored as a binary field. The highlighter does not convert the binary value into a valid string. \nI'll work on a fix. \nAs a workaround you can use a text field with a keyword analyzer:\n\n```\n\"message\": {\n \"type\": \"text\",\n \"analyzer\": \"keyword\",\n \"store\": true\n}\n```\n\n... or you can force highlighting on source:\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-highlighting.html#_force_highlighting_on_source\n", "created_at": "2016-11-18T08:32:04Z" }, { "body": "For posterity, there was a separate issue of searching vs all converting the string to lowercase which would not match the keyword data iirc, which has an existing flag already. Thanks for looking into the other issue though!\n", "created_at": "2016-11-18T09:31:42Z" } ], "number": 21636, "title": "Highlighting does not work when all fields are type keyword" }
{ "body": "The highlighter converts stored keyword fields using toString().\r\nSince the keyword fields are stored as utf8 bytes the conversion is broken.\r\nThis change uses BytesRef.utf8toString() to convert the field value in a valid string.\r\n\r\nFixes #21636", "number": 21645, "review_comments": [ { "body": "would it work if we called MappedFieldType.valueForDisplay instead? I'm concerned some fields are stored as bytesrefs too but do not represent utf8 strings, like ip addresses\n", "created_at": "2016-11-18T10:48:43Z" }, { "body": "I agree that it's better to call valueForDisplay (I'll change the PR) though we are protected here since the highlighting can be done only on text or keyword fields.\n", "created_at": "2016-11-18T10:57:08Z" }, { "body": "Should we just do `String text = mapper.fieldType().valueForDisplay(textToHighlight).toString();` all the time? This looks correct to me and is more future-proof? If that does not work, feel free to merge the PR as-is.\n", "created_at": "2016-11-18T16:39:48Z" }, { "body": "The type of textToHighlight can be different whether it comes from the _source or from the stored fields. Calling valueForDisplay on it when it comes from _source would not work.", "created_at": "2016-11-21T09:29:13Z" } ], "title": "Fix highlighting on a stored keyword field" }
{ "commits": [ { "message": "Fix highlighting on a stored keyword field\n\nThe highlighter converts stored keyword fields using toString().\nSince the keyword fields are stored as utf8 bytes the conversion is broken.\nThis change uses BytesRef.utf8toString() to convert the field value in a valid string.\n\nFixes #21636" }, { "message": "Replace BytesRef#utf8ToString with MappedFieldType#valueForDisplay" } ], "files": [ { "diff": "@@ -31,6 +31,7 @@\n import org.apache.lucene.search.highlight.SimpleHTMLFormatter;\n import org.apache.lucene.search.highlight.SimpleSpanFragmenter;\n import org.apache.lucene.search.highlight.TextFragment;\n+import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.BytesRefHash;\n import org.apache.lucene.util.CollectionUtil;\n import org.elasticsearch.ExceptionsHelper;\n@@ -106,7 +107,12 @@ public HighlightField highlight(HighlighterContext highlighterContext) {\n textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext);\n \n for (Object textToHighlight : textsToHighlight) {\n- String text = textToHighlight.toString();\n+ String text;\n+ if (textToHighlight instanceof BytesRef) {\n+ text = mapper.fieldType().valueForDisplay(textToHighlight).toString();\n+ } else {\n+ text = textToHighlight.toString();\n+ }\n \n try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().name(), text)) {\n if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {", "filename": "core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java", "status": "modified" }, { "diff": "@@ -24,7 +24,6 @@\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n-import org.elasticsearch.action.search.SearchType;\n import org.elasticsearch.action.support.WriteRequest;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.geo.GeoPoint;\n@@ -41,7 +40,6 @@\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;\n import org.elasticsearch.index.search.MatchQuery;\n-import org.elasticsearch.indices.IndicesRequestCache;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.search.SearchHit;\n@@ -106,6 +104,29 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {\n return Collections.singletonList(InternalSettingsPlugin.class);\n }\n \n+ public void testHighlightingWithStoredKeyword() throws IOException {\n+ XContentBuilder mappings = jsonBuilder();\n+ mappings.startObject();\n+ mappings.startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"text\")\n+ .field(\"type\", \"keyword\")\n+ .field(\"store\", true)\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+ mappings.endObject();\n+ assertAcked(prepareCreate(\"test\")\n+ .addMapping(\"type\", mappings));\n+ client().prepareIndex(\"test\", \"type\", \"1\")\n+ .setSource(jsonBuilder().startObject().field(\"text\", \"foo\").endObject())\n+ .get();\n+ refresh();\n+ SearchResponse search = client().prepareSearch().setQuery(matchQuery(\"text\", \"foo\"))\n+ .highlighter(new HighlightBuilder().field(new Field(\"text\"))).get();\n+ assertHighlight(search, 0, \"text\", 0, equalTo(\"<em>foo</em>\"));\n+ }\n+\n public void testHighlightingWithWildcardName() throws IOException {\n // test the kibana case with * as fieldname that will try highlight all fields including meta fields\n XContentBuilder mappings = jsonBuilder();", "filename": "core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0/5.0.1 \r\nHi, I updated our ES from 2.4 to 5.0.X, and encountered this synonyms bug.\r\nthe solar format like following doesn't work anymore with dis_max/multi-match/cross_fields\r\n \"synonym\": {\r\n \"type\": \"synonym\",\r\n \"synonyms\": [\r\n \"1st,first\",\r\n \"2nd,second\"\r\n ]\r\n },\r\nfor more info, \"=>\" works, but not exactly what we want\r\n \"synonym\": {\r\n \"type\": \"synonym\",\r\n \"synonyms\": [\r\n \"1st=>first\",\r\n \"2nd=>second\"\r\n ]\r\n },\r\n\r\n**Steps to reproduce**:\r\nI am able to simplify our case and reproduce with clean ES 5.0.1 (by docker/elasticsearch image)\r\n\r\n1) create index and insert 2 sample records\r\n```\r\ncurl -XPUT localhost:9200/test -d '\r\n{\r\n \"settings\": {\r\n \"analysis\": {\r\n \"filter\": {\r\n \"synonym\": {\r\n \"type\": \"synonym\",\r\n \"synonyms\": [\r\n \"1st,first\",\r\n \"2nd,second\"\r\n ]\r\n },\r\n \"english_stemmer\": {\r\n \"type\": \"stemmer\",\r\n \"name\": \"english\"\r\n }\r\n },\r\n \"analyzer\": {\r\n \"english\": {\r\n \"tokenizer\": \"standard\",\r\n \"filter\": [\r\n \"lowercase\",\r\n \"stop\",\r\n \"english_stemmer\"\r\n ]\r\n },\r\n \"english_search\": {\r\n \"tokenizer\": \"standard\",\r\n \"filter\": [\r\n \"lowercase\",\r\n \"synonym\",\r\n \"stop\",\r\n \"english_stemmer\"\r\n ]\r\n }\r\n }\r\n }\r\n },\r\n \"mappings\": {\r\n \"product\": {\r\n \"_all\": {\r\n \"enabled\": false\r\n },\r\n \"properties\": {\r\n \"name\": {\r\n \"type\": \"text\",\r\n \"analyzer\": \"english\",\r\n \"search_analyzer\": \"english_search\"\r\n },\r\n \"desc\": {\r\n \"type\": \"text\",\r\n \"analyzer\": \"english\",\r\n \"search_analyzer\": \"english_search\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n'\r\n\r\ncurl -XPUT localhost:9200/test/product/1 -d ' { \"name\" : \"first shoe\", \"desc\": \"desc\" } '\r\ncurl -XPUT localhost:9200/test/product/2 -d ' { \"name\" : \"1st shoes\", \"desc\": \"desc\" } '\r\n```\r\n\r\n2) search with single term works as expected\r\n```\r\ncurl 'localhost:9200/test/_search?q=name:1st'\r\n```\r\n\r\n3) multimatch doesn't return result, this is to search '1st' in all fields as cross_fields.\r\n```\r\ncurl localhost:9200/test/_search -d '\r\n{ \"query\" : {\r\n \"bool\" : {\r\n \"must\" : [\r\n { \"dis_max\" : {\r\n \"queries\" : [\r\n {\r\n \"multi_match\" : {\r\n \"query\" : \"1st\",\r\n \"fields\" : [ \"name^1.0\", \"desc^1.0\"],\r\n \"type\": \"cross_fields\",\r\n \"operator\" : \"OR\" \r\n }\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n }\r\n} }\r\n'\r\n```\r\n\r\n4) if I replace synonym in setting to be \"1st=>first\", and recreate whole index, then the dis_max/multi_match works.\r\n\r\n\r\n\r\n", "comments": [], "number": 21633, "title": "ES 5.0.X synonyms bug with multi-match/cross_fields" }
{ "body": "This change fixes the cross_fields type of the multi_match query when synonyms are involved.\r\nSince 2.x the Lucene query parser creates SynonymQuery for words that appear at the same position.\r\nFor simple term query the CrossFieldsQueryBuilder expands the term to all requested fields and creates a BlendedTermQuery.\r\nThis change adds the same mechanism for SynonymQuery which otherwise are not expanded to all requested fields.\r\nAs a side note I wonder if we should not replace the BlendedTermQuery with the SynonymQuery. They have the same purpose and behave similarly.\r\n\r\nFixes #21633", "number": 21638, "review_comments": [], "title": "Fix cross_fields type on multi_match query with synonyms" }
{ "commits": [ { "message": "Fix cross_fields type on multi_match query with synonyms\n\nThis change fixes the cross_fields type of the multi_match query when synonyms are involved.\nSince 2.x the Lucene query parser creates SynonymQuery for words that appear at the same position.\nFor simple term query the CrossFieldsQueryBuilder expands the term to all requested fields and creates a BlendedTermQuery.\nThis change adds the same mechanism for SynonymQuery which otherwise are not expanded to all requested fields.\nAs a side note I wonder if we should not replace the BlendedTermQuery with the SynonymQuery. They have the same purpose and behave similarly.\n\nFixes #21633" }, { "message": "Fallback to SynonymQuery for blended terms on a single field" } ], "files": [ { "diff": "@@ -31,6 +31,7 @@\n import org.apache.lucene.search.MultiTermQuery;\n import org.apache.lucene.search.PhraseQuery;\n import org.apache.lucene.search.Query;\n+import org.apache.lucene.search.SynonymQuery;\n import org.apache.lucene.search.TermQuery;\n import org.apache.lucene.util.QueryBuilder;\n import org.elasticsearch.ElasticsearchException;\n@@ -302,6 +303,11 @@ protected Query newTermQuery(Term term) {\n return blendTermQuery(term, mapper);\n }\n \n+ @Override\n+ protected Query newSynonymQuery(Term[] terms) {\n+ return blendTermsQuery(terms, mapper);\n+ }\n+\n public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {\n final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);\n float boost = 1;\n@@ -358,6 +364,10 @@ public Query createCommonTermsQuery(String field, String queryText, Occur highFr\n }\n }\n \n+ protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) {\n+ return new SynonymQuery(terms);\n+ }\n+\n protected Query blendTermQuery(Term term, MappedFieldType fieldType) {\n if (fuzziness != null) {\n if (fieldType != null) {", "filename": "core/src/main/java/org/elasticsearch/index/search/MatchQuery.java", "status": "modified" }, { "diff": "@@ -158,6 +158,10 @@ public Query blendTerm(Term term, MappedFieldType fieldType) {\n return MultiMatchQuery.super.blendTermQuery(term, fieldType);\n }\n \n+ public Query blendTerms(Term[] terms, MappedFieldType fieldType) {\n+ return MultiMatchQuery.super.blendTermsQuery(terms, fieldType);\n+ }\n+\n public Query termQuery(MappedFieldType fieldType, Object value) {\n return MultiMatchQuery.this.termQuery(fieldType, value, lenient);\n }\n@@ -223,6 +227,18 @@ public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<Str\n return queries.isEmpty() ? null : queries;\n }\n \n+ @Override\n+ public Query blendTerms(Term[] terms, MappedFieldType fieldType) {\n+ if (blendedFields == null || blendedFields.length == 1) {\n+ return super.blendTerms(terms, fieldType);\n+ }\n+ BytesRef[] values = new BytesRef[terms.length];\n+ for (int i = 0; i < terms.length; i++) {\n+ values[i] = terms[i].bytes();\n+ }\n+ return MultiMatchQuery.blendTerms(context, values, commonTermsCutoff, tieBreaker, blendedFields);\n+ }\n+\n @Override\n public Query blendTerm(Term term, MappedFieldType fieldType) {\n if (blendedFields == null) {\n@@ -243,44 +259,51 @@ public Query termQuery(MappedFieldType fieldType, Object value) {\n }\n \n static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker,\n+ FieldAndFieldType... blendedFields) {\n+ return blendTerms(context, new BytesRef[] {value}, commonTermsCutoff, tieBreaker, blendedFields);\n+ }\n+\n+ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float commonTermsCutoff, float tieBreaker,\n FieldAndFieldType... blendedFields) {\n List<Query> queries = new ArrayList<>();\n- Term[] terms = new Term[blendedFields.length];\n- float[] blendedBoost = new float[blendedFields.length];\n+ Term[] terms = new Term[blendedFields.length * values.length];\n+ float[] blendedBoost = new float[blendedFields.length * values.length];\n int i = 0;\n for (FieldAndFieldType ft : blendedFields) {\n- Query query;\n- try {\n- query = ft.fieldType.termQuery(value, context);\n- } catch (IllegalArgumentException e) {\n- // the query expects a certain class of values such as numbers\n- // of ip addresses and the value can't be parsed, so ignore this\n- // field\n- continue;\n- } catch (ElasticsearchParseException parseException) {\n- // date fields throw an ElasticsearchParseException with the\n- // underlying IAE as the cause, ignore this field if that is\n- // the case\n- if (parseException.getCause() instanceof IllegalArgumentException) {\n+ for (BytesRef term : values) {\n+ Query query;\n+ try {\n+ query = ft.fieldType.termQuery(term, context);\n+ } catch (IllegalArgumentException e) {\n+ // the query expects a certain class of values such as numbers\n+ // of ip addresses and the value can't be parsed, so ignore this\n+ // field\n continue;\n+ } catch (ElasticsearchParseException parseException) {\n+ // date fields throw an ElasticsearchParseException with the\n+ // underlying IAE as the cause, ignore this field if that is\n+ // the case\n+ if (parseException.getCause() instanceof IllegalArgumentException) {\n+ continue;\n+ }\n+ throw parseException;\n }\n- throw parseException;\n- }\n- float boost = ft.boost;\n- while (query instanceof BoostQuery) {\n- BoostQuery bq = (BoostQuery) query;\n- query = bq.getQuery();\n- boost *= bq.getBoost();\n- }\n- if (query.getClass() == TermQuery.class) {\n- terms[i] = ((TermQuery) query).getTerm();\n- blendedBoost[i] = boost;\n- i++;\n- } else {\n- if (boost != 1f) {\n- query = new BoostQuery(query, boost);\n+ float boost = ft.boost;\n+ while (query instanceof BoostQuery) {\n+ BoostQuery bq = (BoostQuery) query;\n+ query = bq.getQuery();\n+ boost *= bq.getBoost();\n+ }\n+ if (query.getClass() == TermQuery.class) {\n+ terms[i] = ((TermQuery) query).getTerm();\n+ blendedBoost[i] = boost;\n+ i++;\n+ } else {\n+ if (boost != 1f) {\n+ query = new BoostQuery(query, boost);\n+ }\n+ queries.add(query);\n }\n- queries.add(query);\n }\n }\n if (i > 0) {\n@@ -317,6 +340,14 @@ protected Query blendTermQuery(Term term, MappedFieldType fieldType) {\n return queryBuilder.blendTerm(term, fieldType);\n }\n \n+ @Override\n+ protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) {\n+ if (queryBuilder == null) {\n+ return super.blendTermsQuery(terms, fieldType);\n+ }\n+ return queryBuilder.blendTerms(terms, fieldType);\n+ }\n+\n static final class FieldAndFieldType {\n final MappedFieldType fieldType;\n final float boost;", "filename": "core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java", "status": "modified" }, { "diff": "@@ -28,10 +28,12 @@\n import org.apache.lucene.search.DisjunctionMaxQuery;\n import org.apache.lucene.search.MatchAllDocsQuery;\n import org.apache.lucene.search.Query;\n+import org.apache.lucene.search.SynonymQuery;\n import org.apache.lucene.search.TermQuery;\n import org.apache.lucene.util.BytesRef;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.engine.Engine;\n import org.elasticsearch.index.mapper.MapperService;\n@@ -55,18 +57,25 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {\n \n @Before\n public void setup() throws IOException {\n- IndexService indexService = createIndex(\"test\");\n+ Settings settings = Settings.builder()\n+ .put(\"index.analysis.filter.syns.type\",\"synonym\")\n+ .putArray(\"index.analysis.filter.syns.synonyms\",\"quick,fast\")\n+ .put(\"index.analysis.analyzer.syns.tokenizer\",\"standard\")\n+ .put(\"index.analysis.analyzer.syns.filter\",\"syns\").build();\n+ IndexService indexService = createIndex(\"test\", settings);\n MapperService mapperService = indexService.mapperService();\n String mapping = \"{\\n\" +\n \" \\\"person\\\":{\\n\" +\n \" \\\"properties\\\":{\\n\" +\n \" \\\"name\\\":{\\n\" +\n \" \\\"properties\\\":{\\n\" +\n \" \\\"first\\\": {\\n\" +\n- \" \\\"type\\\":\\\"text\\\"\\n\" +\n+ \" \\\"type\\\":\\\"text\\\",\\n\" +\n+ \" \\\"analyzer\\\":\\\"syns\\\"\\n\" +\n \" },\" +\n \" \\\"last\\\": {\\n\" +\n- \" \\\"type\\\":\\\"text\\\"\\n\" +\n+ \" \\\"type\\\":\\\"text\\\",\\n\" +\n+ \" \\\"analyzer\\\":\\\"syns\\\"\\n\" +\n \" }\" +\n \" }\" +\n \" }\\n\" +\n@@ -176,4 +185,34 @@ public void testMultiMatchPrefixWithAllField() throws IOException {\n assertThat(parsedQuery, instanceOf(MultiPhrasePrefixQuery.class));\n assertThat(parsedQuery.toString(), equalTo(\"_all:\\\"foo*\\\"\"));\n }\n+\n+ public void testMultiMatchCrossFieldsWithSynonyms() throws IOException {\n+ QueryShardContext queryShardContext = indexService.newQueryShardContext(\n+ randomInt(20), null, () -> { throw new UnsupportedOperationException(); });\n+\n+ // check that synonym query is used for a single field\n+ Query parsedQuery =\n+ multiMatchQuery(\"quick\").field(\"name.first\")\n+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext);\n+ Term[] terms = new Term[2];\n+ terms[0] = new Term(\"name.first\", \"quick\");\n+ terms[1] = new Term(\"name.first\", \"fast\");\n+ Query expectedQuery = new SynonymQuery(terms);\n+ assertThat(parsedQuery, equalTo(expectedQuery));\n+\n+ // check that blended term query is used for multiple fields\n+ parsedQuery =\n+ multiMatchQuery(\"quick\").field(\"name.first\").field(\"name.last\")\n+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext);\n+ terms = new Term[4];\n+ terms[0] = new Term(\"name.first\", \"quick\");\n+ terms[1] = new Term(\"name.first\", \"fast\");\n+ terms[2] = new Term(\"name.last\", \"quick\");\n+ terms[3] = new Term(\"name.last\", \"fast\");\n+ float[] boosts = new float[4];\n+ Arrays.fill(boosts, 1.0f);\n+ expectedQuery = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f);\n+ assertThat(parsedQuery, equalTo(expectedQuery));\n+\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.1\r\n\r\n**Plugins installed**: []\r\n\r\n**JVM version**: 1.8.0_45 but I've seen other versions, too\r\n\r\n**OS version**: OSX El Capitan but I've seen it on RHEL 6, too\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nI'm expecting templates with higher `order` to always override those with lower `order`. It doesn't seem to happen if one of them has a value of -2147483648 or close to it (tried with 46 and 47) **AND** another has a positive value (tried here with 2). It works if the negative number is less extreme (-2000000000 for example) **OR** if the second template has a higher, yet still negative `order` (-2 for example).\r\n\r\n**Steps to reproduce**:\r\n```\r\n# curl -XDELETE localhost:9200/_template/*\r\n# curl -XDELETE localhost:9200/_all\r\n\r\ncurl -XPUT localhost:9200/_template/higher_order -d '{\r\n \"template\": \"*\",\r\n \"order\": 2,\r\n \"mappings\": {\r\n \"message\": {\r\n \"properties\": {\r\n \"foo\": {\r\n \"type\": \"keyword\"\r\n }\r\n }\r\n }\r\n }\r\n}'\r\n\r\ncurl -XPUT localhost:9200/_template/lower_order -d '{\r\n \"order\" : -2147483648,\r\n \"template\" : \"*\",\r\n \"mappings\" : {\r\n \"message\" : {\r\n \"properties\" : {\r\n \"foo\" : {\r\n \"type\" : \"text\"\r\n }\r\n }\r\n }\r\n }\r\n }'\r\n\r\ncurl -XPUT localhost:9200/test\r\ncurl localhost:9200/test?pretty\r\n# \"foo\" : {\r\n# \"type\" : \"text\" # <----- should be keyword\r\n# }\r\n```\r\n\r\nThis affects Graylog users, see http://docs.graylog.org/en/2.1/pages/configuration/elasticsearch.html#custom-index-mappings", "comments": [ { "body": "It looks like this is due to `XContentMapValues.nodeIntegerValue()` calling `Number#intValue`, which truncates if the value is too large.\n", "created_at": "2016-11-17T16:17:29Z" }, { "body": "Thanks for the quick followup, @jpountz!\n", "created_at": "2016-11-18T06:52:09Z" } ], "number": 21622, "title": "Template with extreme negative order (e.g. -2147483648) overrides positive order template" }
{ "body": "The overflows were happening in two places, the parsing of the template that\r\nimplicitly truncates the `order` when its value does not fall into the `integer`\r\nrange, and the comparator that sorts templates in ascending order, since it\r\nreturns `order2-order1`, which might overflow.\r\n\r\nCloses #21622", "number": 21628, "review_comments": [ { "body": "I think it'd be a bit cleaner not to do this for Byte, Short, Integer, and Long.\n", "created_at": "2016-11-17T17:23:19Z" } ], "title": "Fix integer overflows when dealing with templates." }
{ "commits": [ { "message": "Fix integer overflows when dealing with templates.\n\nThe overflows were happening in two places, the parsing of the template that\nimplicitly truncates the `order` when its value does not fall into the `integer`\nrange, and the comparator that sorts templates in ascending order, since it\nreturns `order2-order1`, which might overflow.\n\nCloses #21622" }, { "message": "iter" } ], "files": [ { "diff": "@@ -474,12 +474,7 @@ private List<IndexTemplateMetaData> findTemplates(CreateIndexClusterStateUpdateR\n }\n }\n \n- CollectionUtil.timSort(templateMetadata, new Comparator<IndexTemplateMetaData>() {\n- @Override\n- public int compare(IndexTemplateMetaData o1, IndexTemplateMetaData o2) {\n- return o2.order() - o1.order();\n- }\n- });\n+ CollectionUtil.timSort(templateMetadata, Comparator.comparingInt(IndexTemplateMetaData::order).reversed());\n return templateMetadata;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java", "status": "modified" }, { "diff": "@@ -21,6 +21,9 @@\n \n import org.apache.lucene.util.BytesRef;\n \n+import java.math.BigDecimal;\n+import java.math.BigInteger;\n+\n /**\n * A set of utilities for numbers.\n */\n@@ -178,4 +181,56 @@ public static boolean isValidDouble(double value) {\n }\n return true;\n }\n+\n+ /** Return the long that {@code n} stores, or throws an exception if the\n+ * stored value cannot be converted to a long that stores the exact same\n+ * value. */\n+ public static long toLongExact(Number n) {\n+ if (n instanceof Byte || n instanceof Short || n instanceof Integer\n+ || n instanceof Long) {\n+ return n.longValue();\n+ } else if (n instanceof Float || n instanceof Double) {\n+ double d = n.doubleValue();\n+ if (d != Math.round(d)) {\n+ throw new IllegalArgumentException(n + \" is not an integer value\");\n+ }\n+ return n.longValue();\n+ } else if (n instanceof BigDecimal) {\n+ return ((BigDecimal) n).toBigIntegerExact().longValueExact();\n+ } else if (n instanceof BigInteger) {\n+ return ((BigInteger) n).longValueExact();\n+ } else {\n+ throw new IllegalArgumentException(\"Cannot check whether [\" + n + \"] of class [\" + n.getClass().getName()\n+ + \"] is actually a long\");\n+ }\n+ }\n+\n+ /** Return the int that {@code n} stores, or throws an exception if the\n+ * stored value cannot be converted to an int that stores the exact same\n+ * value. */\n+ public static int toIntExact(Number n) {\n+ return Math.toIntExact(toLongExact(n));\n+ }\n+\n+ /** Return the short that {@code n} stores, or throws an exception if the\n+ * stored value cannot be converted to a short that stores the exact same\n+ * value. */\n+ public static short toShortExact(Number n) {\n+ long l = toLongExact(n);\n+ if (l != (short) l) {\n+ throw new ArithmeticException(\"short overflow: \" + l);\n+ }\n+ return (short) l;\n+ }\n+\n+ /** Return the byte that {@code n} stores, or throws an exception if the\n+ * stored value cannot be converted to a byte that stores the exact same\n+ * value. */\n+ public static byte toByteExact(Number n) {\n+ long l = toLongExact(n);\n+ if (l != (byte) l) {\n+ throw new ArithmeticException(\"byte overflow: \" + l);\n+ }\n+ return (byte) l;\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/common/Numbers.java", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import org.apache.lucene.util.automaton.CharacterRunAutomaton;\n import org.apache.lucene.util.automaton.Operations;\n import org.elasticsearch.ElasticsearchParseException;\n+import org.elasticsearch.common.Numbers;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.regex.Regex;\n import org.elasticsearch.common.unit.TimeValue;\n@@ -357,7 +358,7 @@ public static double nodeDoubleValue(Object node) {\n \n public static int nodeIntegerValue(Object node) {\n if (node instanceof Number) {\n- return ((Number) node).intValue();\n+ return Numbers.toIntExact((Number) node);\n }\n return Integer.parseInt(node.toString());\n }\n@@ -366,10 +367,7 @@ public static int nodeIntegerValue(Object node, int defaultValue) {\n if (node == null) {\n return defaultValue;\n }\n- if (node instanceof Number) {\n- return ((Number) node).intValue();\n- }\n- return Integer.parseInt(node.toString());\n+ return nodeIntegerValue(node);\n }\n \n public static short nodeShortValue(Object node, short defaultValue) {\n@@ -381,7 +379,7 @@ public static short nodeShortValue(Object node, short defaultValue) {\n \n public static short nodeShortValue(Object node) {\n if (node instanceof Number) {\n- return ((Number) node).shortValue();\n+ return Numbers.toShortExact((Number) node);\n }\n return Short.parseShort(node.toString());\n }\n@@ -395,7 +393,7 @@ public static byte nodeByteValue(Object node, byte defaultValue) {\n \n public static byte nodeByteValue(Object node) {\n if (node instanceof Number) {\n- return ((Number) node).byteValue();\n+ return Numbers.toByteExact((Number) node);\n }\n return Byte.parseByte(node.toString());\n }\n@@ -409,7 +407,7 @@ public static long nodeLongValue(Object node, long defaultValue) {\n \n public static long nodeLongValue(Object node) {\n if (node instanceof Number) {\n- return ((Number) node).longValue();\n+ return Numbers.toLongExact((Number) node);\n }\n return Long.parseLong(node.toString());\n }", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java", "status": "modified" }, { "diff": "@@ -0,0 +1,146 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.common;\n+\n+import org.elasticsearch.test.ESTestCase;\n+\n+import java.math.BigDecimal;\n+import java.math.BigInteger;\n+import java.util.concurrent.atomic.AtomicInteger;\n+\n+public class NumbersTests extends ESTestCase {\n+\n+ public void testToLongExact() {\n+ assertEquals(3L, Numbers.toLongExact(Long.valueOf(3L)));\n+ assertEquals(3L, Numbers.toLongExact(Integer.valueOf(3)));\n+ assertEquals(3L, Numbers.toLongExact(Short.valueOf((short) 3)));\n+ assertEquals(3L, Numbers.toLongExact(Byte.valueOf((byte) 3)));\n+ assertEquals(3L, Numbers.toLongExact(3d));\n+ assertEquals(3L, Numbers.toLongExact(3f));\n+ assertEquals(3L, Numbers.toLongExact(BigInteger.valueOf(3L)));\n+ assertEquals(3L, Numbers.toLongExact(BigDecimal.valueOf(3L)));\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(3.1d));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.NaN));\n+ assertEquals(\"NaN is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.POSITIVE_INFINITY));\n+ assertEquals(\"Infinity is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(3.1f));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(new AtomicInteger(3))); // not supported\n+ assertEquals(\"Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long\", e.getMessage());\n+ }\n+\n+ public void testToIntExact() {\n+ assertEquals(3L, Numbers.toIntExact(Long.valueOf(3L)));\n+ assertEquals(3L, Numbers.toIntExact(Integer.valueOf(3)));\n+ assertEquals(3L, Numbers.toIntExact(Short.valueOf((short) 3)));\n+ assertEquals(3L, Numbers.toIntExact(Byte.valueOf((byte) 3)));\n+ assertEquals(3L, Numbers.toIntExact(3d));\n+ assertEquals(3L, Numbers.toIntExact(3f));\n+ assertEquals(3L, Numbers.toIntExact(BigInteger.valueOf(3L)));\n+ assertEquals(3L, Numbers.toIntExact(BigDecimal.valueOf(3L)));\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toIntExact(3.1d));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.NaN));\n+ assertEquals(\"NaN is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.POSITIVE_INFINITY));\n+ assertEquals(\"Infinity is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toIntExact(3.1f));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ ArithmeticException ae = expectThrows(ArithmeticException.class,\n+ () -> Numbers.toIntExact(1L << 40));\n+ assertEquals(\"integer overflow\", ae.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toIntExact(new AtomicInteger(3))); // not supported\n+ assertEquals(\"Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long\", e.getMessage());\n+ }\n+\n+ public void testToShortExact() {\n+ assertEquals(3L, Numbers.toShortExact(Long.valueOf(3L)));\n+ assertEquals(3L, Numbers.toShortExact(Integer.valueOf(3)));\n+ assertEquals(3L, Numbers.toShortExact(Short.valueOf((short) 3)));\n+ assertEquals(3L, Numbers.toShortExact(Byte.valueOf((byte) 3)));\n+ assertEquals(3L, Numbers.toShortExact(3d));\n+ assertEquals(3L, Numbers.toShortExact(3f));\n+ assertEquals(3L, Numbers.toShortExact(BigInteger.valueOf(3L)));\n+ assertEquals(3L, Numbers.toShortExact(BigDecimal.valueOf(3L)));\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toShortExact(3.1d));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.NaN));\n+ assertEquals(\"NaN is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.POSITIVE_INFINITY));\n+ assertEquals(\"Infinity is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toShortExact(3.1f));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ ArithmeticException ae = expectThrows(ArithmeticException.class,\n+ () -> Numbers.toShortExact(100000));\n+ assertEquals(\"short overflow: \" + 100000, ae.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toShortExact(new AtomicInteger(3))); // not supported\n+ assertEquals(\"Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long\", e.getMessage());\n+ }\n+\n+ public void testToByteExact() {\n+ assertEquals(3L, Numbers.toByteExact(Long.valueOf(3L)));\n+ assertEquals(3L, Numbers.toByteExact(Integer.valueOf(3)));\n+ assertEquals(3L, Numbers.toByteExact(Short.valueOf((short) 3)));\n+ assertEquals(3L, Numbers.toByteExact(Byte.valueOf((byte) 3)));\n+ assertEquals(3L, Numbers.toByteExact(3d));\n+ assertEquals(3L, Numbers.toByteExact(3f));\n+ assertEquals(3L, Numbers.toByteExact(BigInteger.valueOf(3L)));\n+ assertEquals(3L, Numbers.toByteExact(BigDecimal.valueOf(3L)));\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toByteExact(3.1d));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.NaN));\n+ assertEquals(\"NaN is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toLongExact(Double.POSITIVE_INFINITY));\n+ assertEquals(\"Infinity is not an integer value\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toByteExact(3.1f));\n+ assertEquals(\"3.1 is not an integer value\", e.getMessage());\n+ ArithmeticException ae = expectThrows(ArithmeticException.class,\n+ () -> Numbers.toByteExact(300));\n+ assertEquals(\"byte overflow: \" + 300, ae.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> Numbers.toByteExact(new AtomicInteger(3))); // not supported\n+ assertEquals(\"Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long\", e.getMessage());\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/common/NumbersTests.java", "status": "added" } ] }
{ "body": "When a field defines a boost directly in the mapping:\r\n\r\n````\r\nPUT t\r\n{\r\n \"mappings\": {\r\n \"t\": {\r\n \"properties\": {\r\n \"textfield\": {\r\n \"type\": \"text\", \r\n \"boost\": 3\r\n }\r\n }\r\n }\r\n }\r\n}\r\n````\r\n\r\n... the match_phrase_prefix query does not consider the last term as prefix:\r\n\r\n````\r\nPOST t/t\r\n{\r\n \"textfield\" : \"foo bar\"\r\n}\r\n\r\nGET t/t/_search\r\n{\r\n \"query\" : {\r\n \"match_phrase_prefix\": {\r\n \"textfield\": \"f\"\r\n }\r\n }\r\n}\r\n`````\r\n\r\nThe above query does not return the hit.", "comments": [], "number": 21613, "title": "MatchPhrasePrefix does not work with fields boosted in the index mapping" }
{ "body": "This change fixes the match_phrase_prefix on fields that define a boost in their mapping.\r\n\r\nFixes #21613", "number": 21623, "review_comments": [ { "body": "should we then apply the boost again on the new query?\n", "created_at": "2016-11-17T14:28:15Z" }, { "body": "would be nice to have a unit test rather than an integration test for this\n", "created_at": "2016-11-17T14:29:14Z" } ], "title": "Fix match_phrase_prefix on boosted fields" }
{ "commits": [ { "message": "Fix match_phrase_prefix on boosted fields\n\nThis change fixes the match_phrase_prefix on fields that define a boost in their mapping.\n\nFixes #21613" } ], "files": [ { "diff": "@@ -25,6 +25,7 @@\n import org.apache.lucene.search.BooleanClause;\n import org.apache.lucene.search.BooleanClause.Occur;\n import org.apache.lucene.search.BooleanQuery;\n+import org.apache.lucene.search.BoostQuery;\n import org.apache.lucene.search.FuzzyQuery;\n import org.apache.lucene.search.MultiPhraseQuery;\n import org.apache.lucene.search.MultiTermQuery;\n@@ -303,31 +304,38 @@ protected Query newTermQuery(Term term) {\n \n public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {\n final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);\n+ float boost = 1;\n+ Query innerQuery = query;\n+ while (innerQuery instanceof BoostQuery) {\n+ BoostQuery bq = (BoostQuery) innerQuery;\n+ boost *= bq.getBoost();\n+ innerQuery = bq.getQuery();\n+ }\n final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery();\n prefixQuery.setMaxExpansions(maxExpansions);\n prefixQuery.setSlop(phraseSlop);\n- if (query instanceof PhraseQuery) {\n- PhraseQuery pq = (PhraseQuery)query;\n+ if (innerQuery instanceof PhraseQuery) {\n+ PhraseQuery pq = (PhraseQuery) innerQuery;\n Term[] terms = pq.getTerms();\n int[] positions = pq.getPositions();\n for (int i = 0; i < terms.length; i++) {\n prefixQuery.add(new Term[] {terms[i]}, positions[i]);\n }\n- return prefixQuery;\n- } else if (query instanceof MultiPhraseQuery) {\n- MultiPhraseQuery pq = (MultiPhraseQuery)query;\n+ return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);\n+ } else if (innerQuery instanceof MultiPhraseQuery) {\n+ MultiPhraseQuery pq = (MultiPhraseQuery) innerQuery;\n Term[][] terms = pq.getTermArrays();\n int[] positions = pq.getPositions();\n for (int i = 0; i < terms.length; i++) {\n prefixQuery.add(terms[i], positions[i]);\n }\n- return prefixQuery;\n- } else if (query instanceof TermQuery) {\n- prefixQuery.add(((TermQuery) query).getTerm());\n- return prefixQuery;\n- } else if (query instanceof AllTermQuery) {\n- prefixQuery.add(((AllTermQuery) query).getTerm());\n- return prefixQuery;\n+ return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);\n+ } else if (innerQuery instanceof TermQuery) {\n+ prefixQuery.add(((TermQuery) innerQuery).getTerm());\n+ return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);\n+ } else if (innerQuery instanceof AllTermQuery) {\n+ prefixQuery.add(((AllTermQuery) innerQuery).getTerm());\n+ return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);\n }\n return query;\n }", "filename": "core/src/main/java/org/elasticsearch/index/search/MatchQuery.java", "status": "modified" }, { "diff": "@@ -22,18 +22,23 @@\n import org.apache.lucene.queries.ExtendedCommonTermsQuery;\n import org.apache.lucene.search.BooleanClause;\n import org.apache.lucene.search.BooleanQuery;\n+import org.apache.lucene.search.BoostQuery;\n import org.apache.lucene.search.FuzzyQuery;\n import org.apache.lucene.search.MatchAllDocsQuery;\n import org.apache.lucene.search.MatchNoDocsQuery;\n import org.apache.lucene.search.PhraseQuery;\n import org.apache.lucene.search.PointRangeQuery;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.search.TermQuery;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;\n import org.elasticsearch.common.ParseFieldMatcher;\n import org.elasticsearch.common.ParsingException;\n+import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;\n import org.elasticsearch.common.lucene.search.Queries;\n import org.elasticsearch.index.mapper.MappedFieldType;\n+import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.search.MatchQuery;\n import org.elasticsearch.index.search.MatchQuery.Type;\n import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery;\n@@ -458,4 +463,35 @@ public void testParseFailsWithTermsArray() throws Exception {\n \"}\";\n expectThrows(IllegalStateException.class, () -> parseQuery(json2));\n }\n+\n+ @Override\n+ protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {\n+ mapperService.merge(\"t_boost\", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(\"t_boost\",\n+ \"string_boost\", \"type=text,boost=4\").string()), MapperService.MergeReason.MAPPING_UPDATE, false);\n+ }\n+\n+ public void testMatchPhrasePrefixWithBoost() throws Exception {\n+ assumeTrue(\"test runs only when at least a type is registered\", getCurrentTypes().length > 0);\n+ QueryShardContext context = createShardContext();\n+ assumeTrue(\"test runs only when the index version is on or after V_5_0_0_alpha1\",\n+ context.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1));\n+\n+ {\n+ // field boost is applied on a single term query\n+ MatchPhrasePrefixQueryBuilder builder = new MatchPhrasePrefixQueryBuilder(\"string_boost\", \"foo\");\n+ Query query = builder.toQuery(context);\n+ assertThat(query, instanceOf(BoostQuery.class));\n+ assertThat(((BoostQuery) query).getBoost(), equalTo(4f));\n+ Query innerQuery = ((BoostQuery) query).getQuery();\n+ assertThat(innerQuery, instanceOf(MultiPhrasePrefixQuery.class));\n+ }\n+\n+ {\n+ // field boost is ignored on phrase query\n+ MatchPhrasePrefixQueryBuilder builder = new MatchPhrasePrefixQueryBuilder(\"string_boost\", \"foo bar\");\n+ Query query = builder.toQuery(context);\n+ assertThat(query, instanceOf(MultiPhrasePrefixQuery.class));\n+ }\n+\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java", "status": "modified" } ] }
{ "body": "When indices stats are requested via the node stats API, there is a\r\nlevel parameter to request stats at the index, node, or shards\r\nlevel. This parameter was not whitelisted when URL parsing was made\r\nstrict. This commit whitelists this parameter.\r\n\r\nAdditionally, there was some leniency in the parsing of this parameter\r\nthat has been removed.\r\n\r\nRelates #20722\r\n", "comments": [ { "body": "Thanks for reviewing @javanna; I've pushed commits in response to your comments.\n", "created_at": "2016-10-19T16:59:36Z" }, { "body": "Thanks @javanna.\n", "created_at": "2016-10-20T02:03:46Z" } ], "number": 21024, "title": "Whitelist node stats indices level parameter" }
{ "body": "A previous commit added strict level parsing for the node stats API, but\r\nthat commit missed adding the same for the indices stats API. This\r\ncommit rectifies this miss.\r\n\r\nNote that a garbage level parameter produces an empty stats response,\r\nthere is nothing gained by allowing this.\r\n\r\nRelates #21024", "number": 21577, "review_comments": [], "title": "Strict level parsing for indices stats" }
{ "commits": [ { "message": "Strict level parsing for indices stats\n\nA previous commit added strict level parsing for the node stats API, but\nthat commit missed adding the same for the indices stats API. This\ncommit rectifies this miss." } ], "files": [ { "diff": "@@ -152,12 +152,14 @@ public void writeTo(StreamOutput out) throws IOException {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- String level = params.param(\"level\", \"indices\");\n- boolean isLevelValid = \"indices\".equalsIgnoreCase(level) || \"shards\".equalsIgnoreCase(level) || \"cluster\".equalsIgnoreCase(level);\n+ final String level = params.param(\"level\", \"indices\");\n+ final boolean isLevelValid =\n+ \"cluster\".equalsIgnoreCase(level) || \"indices\".equalsIgnoreCase(level) || \"shards\".equalsIgnoreCase(level);\n if (!isLevelValid) {\n- return builder;\n+ throw new IllegalArgumentException(\"level parameter must be one of [cluster] or [indices] or [shards] but was [\" + level + \"]\");\n }\n \n+\n builder.startObject(\"_all\");\n \n builder.startObject(\"primaries\");", "filename": "core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java", "status": "modified" }, { "diff": "@@ -0,0 +1,43 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.action.admin.indices.stats;\n+\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import java.util.Collections;\n+\n+import static org.hamcrest.CoreMatchers.containsString;\n+import static org.hamcrest.object.HasToString.hasToString;\n+\n+\n+public class IndicesStatsResponseTests extends ESTestCase {\n+\n+ public void testInvalidLevel() {\n+ final IndicesStatsResponse response = new IndicesStatsResponse();\n+ final String level = randomAsciiOfLength(16);\n+ final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap(\"level\", level));\n+ final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> response.toXContent(null, params));\n+ assertThat(\n+ e,\n+ hasToString(containsString(\"level parameter must be one of [cluster] or [indices] or [shards] but was [\" + level + \"]\")));\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java", "status": "added" } ] }
{ "body": "Today when parsing a stats request, Elasticsearch silently ignores\r\nincorrect metrics. This commit removes lenient parsing of stats requests\r\nfor the nodes stats and indices stats APIs.\r\n\r\nRelates #20722, relates #21410", "comments": [ { "body": "Right now this is targeting 5.1.0 and 6.0.0, and I think it's worth discussing if we should include this in 5.0.x too.\n\nWhen I backport this, I will include a note in the migration docs.\n", "created_at": "2016-11-08T22:18:21Z" }, { "body": "This is a breaking change and should not be included until a major version (6.0+). Anyone actually tripping on this is not erroring today and their 5.x behavior isn't broken. They have the same behavior they have since 5.0 was deployed. This will change them to throwing errors. This doesn't help anyone, but can absolutely break some. It's a major version fix by any semantic version definition.\n", "created_at": "2016-11-08T22:31:54Z" }, { "body": "> This is a breaking change and should not be included until a major version (6.0+)\n\nI'd agree with this.\n", "created_at": "2016-11-08T23:06:10Z" }, { "body": "retest this please\n", "created_at": "2016-11-09T00:04:27Z" }, { "body": "> This is a breaking change and should not be included until a major version (6.0+). Anyone actually tripping on this is not erroring today and their 5.x behavior isn't broken.\n\nThey are broken now. Let's suppose that someone is hitting these endpoints intending to monitor certain stats, and they have a typo in their request: `/_nodes/stats/os,jvm,thread_poll`. Today they are silently missing the `thread_pool` stats. That's really bad, they are broken now, and this is due to a bug in Elasticsearch. This is not a breaking change like something that is working stops working, it's bug fix in that something that is silently ignoring bad requests will now tell users. And this _only_ impacts users that have bad requests, it does not impact anyone that is sending good requests. Instead of missing those stats, which they almost surely intend to be monitoring closely, they would now see:\n\n```\n{\n \"error\" : {\n \"root_cause\" : [\n {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"request [/_nodes/stats/os,jvm,thread_poll] contains unrecognized metric: [thread_poll] -> did you mean [thread_pool]?\"\n }\n ],\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"request [/_nodes/stats/os,jvm,thread_poll] contains unrecognized metric: [thread_poll] -> did you mean [thread_pool]?\"\n },\n \"status\" : 400\n}\n```\n\nThis is helpful, not harmful, to those users. By getting this change in now, we can help them fix their requests instead of possibly waiting another year before discovering that they are broken. We think that the advantages (helping users fix their broken requests, not allowing additional users to accumulate broken requests) of making this change (again, only for users already sending bad requests) outweigh the disadvantages (causing requests that 200 _but miss data_ today to 400).\n", "created_at": "2016-11-09T00:19:00Z" }, { "body": "What what if they have `/_nodes/stats/os,jvm,thread_pool,other`? And they're working fine. Instead they'll get an error. It's a breaking change by definition. The fact that it isn't viewed as a breaking change is _exactly_ the problem. I don't know how to make it any clearer.\n\nThere's also an inherent assumption coming across here that the user has any control over actually fixing this, and isn't a monitoring system or a library away which they have to hope is updated ASAP while they remain broken. A user upgrading to a minor or \"bugfix\" release should not experience intentional breaks. It's that simple, and a major tenant of why semantic versioning exists.\n\nIf you break users with stuff like this, at least some will stop upgrading. That's how we end up with ancient versions all over the place. Versioning and break alignments with proper versioning is all for good reason.\n", "created_at": "2016-11-09T00:26:11Z" }, { "body": "> What what if they have `/_nodes/stats/os,jvm,thread_pool,other`? And they're working fine.\n\nThey think that `other` is doing something, and it's not.\n", "created_at": "2016-11-09T00:28:27Z" }, { "body": "@jasontedor That's a lot of assumptions. It may be vestigate, it may have never worked (most likely), but whatever downstream consumption may or may not care and they may be working 100% as intended. If it's been that was a long time that's _most likely_ the case. And if they weren't, and let's assume you're right. They went from 75% working to 100% broken. That's still bad, under the best of circumstances. It's a breaking change. It should be in a major version.\n\nIf I can't make you see this, that makes me sad, and I hope others on the elastic team recognize the importance of these things.\n", "created_at": "2016-11-09T00:36:34Z" }, { "body": "We hear you @NickCraver, and we appreciate your feedback. I think we are just coming at this from two different but reasonable sides. If we were talking about a change that breaks valid requests, I think we would be in complete agreement. Where we see things differently is for invalid requests.\n\n> They went from 75% working to 100% broken. That's still bad, under the best of circumstances. It's a breaking change. It should be in a major version.\n\nAs I said:\n\n> We think that the advantages (helping users fix their broken requests, not allowing additional users to accumulate broken requests) of making this change (again, only for users already sending bad requests) outweigh the disadvantages (causing requests that 200 but miss data today to 400).\n", "created_at": "2016-11-09T01:09:19Z" }, { "body": "retest this please\n", "created_at": "2016-11-15T15:32:34Z" }, { "body": "Thanks @dakrone.\n", "created_at": "2016-11-15T18:22:08Z" } ], "number": 21417, "title": "Remove lenient stats parsing" }
{ "body": "Today when parsing a stats request, Elasticsearch silently ignores\r\nincorrect metrics. This commit removes lenient parsing of stats requests\r\nfor the nodes stats and indices stats APIs.\r\n\r\nThis pull request is a backport of #21417 to 5.x; notable changes in the\r\nbackport include f497c7da502234125e5e77dc6b43542f24dd9d93 which adds a\r\nnote to the migration docs and 322009f57c5bffef5d7b24fa4f8d8bc1076afe48\r\nwhich adds a backwards compatibility layer for percolate stats.\r\n\r\nRelates #20722, relates #21410, relates #21417", "number": 21576, "review_comments": [], "title": "Remove lenient stats parsing 5.x" }
{ "commits": [ { "message": "Remove lenient stats parsing\n\nToday when parsing a stats request, Elasticsearch silently ignores\nincorrect metrics. This commit removes lenient parsing of stats requests\nfor the nodes stats and indices stats APIs." }, { "message": "Add note to migration docs for strict stats\n\nThis commit adds a note to the migration docs regarding the strict\nparsing of stats metrics introduced in 5.1.0." }, { "message": "Fixed bad asciidoc ID in node stats" }, { "message": "Add BWC support for percolate metric\n\nThe percolate API was removed in 5.0.0, so support for obtaining\npercolate metrics from the nodes stats and index stats APIs was\nremoved. This commit adds a BWC support layer for requests for percolate\nmetrics on these endpoints so that requests for percolate stats that\nsucceeded in 2.x will not fail in 5.x. Note that this does not mean that\nstats are returned, just that the request will not see a 400 status\ncode." } ], "files": [ { "diff": "@@ -71,49 +71,58 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl\n request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new));\n \n // validate the non-response params\n- if (unconsumedParams.isEmpty() == false) {\n- String message = String.format(\n- Locale.ROOT,\n- \"request [%s] contains unrecognized parameter%s: \",\n- request.path(),\n- unconsumedParams.size() > 1 ? \"s\" : \"\");\n- boolean first = true;\n- for (final String unconsumedParam : unconsumedParams) {\n- final LevensteinDistance ld = new LevensteinDistance();\n- final List<Tuple<Float, String>> scoredParams = new ArrayList<>();\n- final Set<String> candidateParams = new HashSet<>();\n- candidateParams.addAll(request.consumedParams());\n- candidateParams.addAll(responseParams());\n- for (final String candidateParam : candidateParams) {\n- final float distance = ld.getDistance(unconsumedParam, candidateParam);\n- if (distance > 0.5f) {\n- scoredParams.add(new Tuple<>(distance, candidateParam));\n- }\n- }\n- CollectionUtil.timSort(scoredParams, (a, b) -> {\n- // sort by distance in reverse order, then parameter name for equal distances\n- int compare = a.v1().compareTo(b.v1());\n- if (compare != 0) return -compare;\n- else return a.v2().compareTo(b.v2());\n- });\n- if (first == false) {\n- message += \", \";\n- }\n- message += \"[\" + unconsumedParam + \"]\";\n- final List<String> keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList());\n- if (keys.isEmpty() == false) {\n- message += \" -> did you mean \" + (keys.size() == 1 ? \"[\" + keys.get(0) + \"]\": \"any of \" + keys.toString()) + \"?\";\n- }\n- first = false;\n- }\n-\n- throw new IllegalArgumentException(message);\n+ if (!unconsumedParams.isEmpty()) {\n+ final Set<String> candidateParams = new HashSet<>();\n+ candidateParams.addAll(request.consumedParams());\n+ candidateParams.addAll(responseParams());\n+ throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, \"parameter\"));\n }\n \n // execute the action\n action.accept(channel);\n }\n \n+ protected final String unrecognized(\n+ final RestRequest request,\n+ final Set<String> invalids,\n+ final Set<String> candidates,\n+ final String detail) {\n+ String message = String.format(\n+ Locale.ROOT,\n+ \"request [%s] contains unrecognized %s%s: \",\n+ request.path(),\n+ detail,\n+ invalids.size() > 1 ? \"s\" : \"\");\n+ boolean first = true;\n+ for (final String invalid : invalids) {\n+ final LevensteinDistance ld = new LevensteinDistance();\n+ final List<Tuple<Float, String>> scoredParams = new ArrayList<>();\n+ for (final String candidate : candidates) {\n+ final float distance = ld.getDistance(invalid, candidate);\n+ if (distance > 0.5f) {\n+ scoredParams.add(new Tuple<>(distance, candidate));\n+ }\n+ }\n+ CollectionUtil.timSort(scoredParams, (a, b) -> {\n+ // sort by distance in reverse order, then parameter name for equal distances\n+ int compare = a.v1().compareTo(b.v1());\n+ if (compare != 0) return -compare;\n+ else return a.v2().compareTo(b.v2());\n+ });\n+ if (first == false) {\n+ message += \", \";\n+ }\n+ message += \"[\" + invalid + \"]\";\n+ final List<String> keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList());\n+ if (keys.isEmpty() == false) {\n+ message += \" -> did you mean \" + (keys.size() == 1 ? \"[\" + keys.get(0) + \"]\" : \"any of \" + keys.toString()) + \"?\";\n+ }\n+ first = false;\n+ }\n+\n+ return message;\n+ }\n+\n /**\n * REST requests are handled by preparing a channel consumer that represents the execution of\n * the request against a channel.", "filename": "core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java", "status": "modified" }, { "diff": "@@ -55,7 +55,7 @@ public class RestNodesInfoAction extends BaseRestHandler {\n public RestNodesInfoAction(Settings settings, RestController controller, SettingsFilter settingsFilter) {\n super(settings);\n controller.registerHandler(GET, \"/_nodes\", this);\n- // this endpoint is used for metrics, not for nodeIds, like /_nodes/fs\n+ // this endpoint is used for metrics, not for node IDs, like /_nodes/fs\n controller.registerHandler(GET, \"/_nodes/{nodeId}\", this);\n controller.registerHandler(GET, \"/_nodes/{nodeId}/{metrics}\", this);\n // added this endpoint to be aligned with stats", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java", "status": "modified" }, { "diff": "@@ -33,7 +33,13 @@\n \n import java.io.IOException;\n import java.util.Collections;\n+import java.util.HashMap;\n+import java.util.HashSet;\n+import java.util.Locale;\n+import java.util.Map;\n import java.util.Set;\n+import java.util.TreeSet;\n+import java.util.function.Consumer;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n \n@@ -48,9 +54,38 @@ public RestNodesStatsAction(Settings settings, RestController controller) {\n controller.registerHandler(GET, \"/_nodes/stats/{metric}\", this);\n controller.registerHandler(GET, \"/_nodes/{nodeId}/stats/{metric}\", this);\n \n- controller.registerHandler(GET, \"/_nodes/stats/{metric}/{indexMetric}\", this);\n+ controller.registerHandler(GET, \"/_nodes/stats/{metric}/{index_metric}\", this);\n \n- controller.registerHandler(GET, \"/_nodes/{nodeId}/stats/{metric}/{indexMetric}\", this);\n+ controller.registerHandler(GET, \"/_nodes/{nodeId}/stats/{metric}/{index_metric}\", this);\n+ }\n+\n+ static final Map<String, Consumer<NodesStatsRequest>> METRICS;\n+\n+ static {\n+ final Map<String, Consumer<NodesStatsRequest>> metrics = new HashMap<>();\n+ metrics.put(\"os\", r -> r.os(true));\n+ metrics.put(\"jvm\", r -> r.jvm(true));\n+ metrics.put(\"thread_pool\", r -> r.threadPool(true));\n+ metrics.put(\"fs\", r -> r.fs(true));\n+ metrics.put(\"transport\", r -> r.transport(true));\n+ metrics.put(\"http\", r -> r.http(true));\n+ metrics.put(\"indices\", r -> r.indices(true));\n+ metrics.put(\"process\", r -> r.process(true));\n+ metrics.put(\"breaker\", r -> r.breaker(true));\n+ metrics.put(\"script\", r -> r.script(true));\n+ metrics.put(\"discovery\", r -> r.discovery(true));\n+ metrics.put(\"ingest\", r -> r.ingest(true));\n+ METRICS = Collections.unmodifiableMap(metrics);\n+ }\n+\n+ static final Map<String, Consumer<CommonStatsFlags>> FLAGS;\n+\n+ static {\n+ final Map<String, Consumer<CommonStatsFlags>> flags = new HashMap<>();\n+ for (final Flag flag : CommonStatsFlags.Flag.values()) {\n+ flags.put(flag.getRestName(), f -> f.set(flag, true));\n+ }\n+ FLAGS = Collections.unmodifiableMap(flags);\n }\n \n @Override\n@@ -62,35 +97,78 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n nodesStatsRequest.timeout(request.param(\"timeout\"));\n \n if (metrics.size() == 1 && metrics.contains(\"_all\")) {\n+ if (request.hasParam(\"index_metric\")) {\n+ throw new IllegalArgumentException(\n+ String.format(\n+ Locale.ROOT,\n+ \"request [%s] contains index metrics [%s] but all stats requested\",\n+ request.path(),\n+ request.param(\"index_metric\")));\n+ }\n nodesStatsRequest.all();\n nodesStatsRequest.indices(CommonStatsFlags.ALL);\n+ } else if (metrics.contains(\"_all\")) {\n+ throw new IllegalArgumentException(\n+ String.format(Locale.ROOT,\n+ \"request [%s] contains _all and individual metrics [%s]\",\n+ request.path(),\n+ request.param(\"metric\")));\n } else {\n nodesStatsRequest.clear();\n- nodesStatsRequest.os(metrics.contains(\"os\"));\n- nodesStatsRequest.jvm(metrics.contains(\"jvm\"));\n- nodesStatsRequest.threadPool(metrics.contains(\"thread_pool\"));\n- nodesStatsRequest.fs(metrics.contains(\"fs\"));\n- nodesStatsRequest.transport(metrics.contains(\"transport\"));\n- nodesStatsRequest.http(metrics.contains(\"http\"));\n- nodesStatsRequest.indices(metrics.contains(\"indices\"));\n- nodesStatsRequest.process(metrics.contains(\"process\"));\n- nodesStatsRequest.breaker(metrics.contains(\"breaker\"));\n- nodesStatsRequest.script(metrics.contains(\"script\"));\n- nodesStatsRequest.discovery(metrics.contains(\"discovery\"));\n- nodesStatsRequest.ingest(metrics.contains(\"ingest\"));\n+\n+ // use a sorted set so the unrecognized parameters appear in a reliable sorted order\n+ final Set<String> invalidMetrics = new TreeSet<>();\n+ for (final String metric : metrics) {\n+ final Consumer<NodesStatsRequest> handler = METRICS.get(metric);\n+ if (handler != null) {\n+ handler.accept(nodesStatsRequest);\n+ } else {\n+ invalidMetrics.add(metric);\n+ }\n+ }\n+\n+ if (!invalidMetrics.isEmpty()) {\n+ throw new IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keySet(), \"metric\"));\n+ }\n \n // check for index specific metrics\n if (metrics.contains(\"indices\")) {\n- Set<String> indexMetrics = Strings.splitStringByCommaToSet(request.param(\"indexMetric\", \"_all\"));\n+ Set<String> indexMetrics = Strings.splitStringByCommaToSet(request.param(\"index_metric\", \"_all\"));\n if (indexMetrics.size() == 1 && indexMetrics.contains(\"_all\")) {\n nodesStatsRequest.indices(CommonStatsFlags.ALL);\n } else {\n CommonStatsFlags flags = new CommonStatsFlags();\n- for (Flag flag : CommonStatsFlags.Flag.values()) {\n- flags.set(flag, indexMetrics.contains(flag.getRestName()));\n+ flags.clear();\n+ // use a sorted set so the unrecognized parameters appear in a reliable sorted order\n+ final Set<String> invalidIndexMetrics = new TreeSet<>();\n+ for (final String indexMetric : indexMetrics) {\n+ final Consumer<CommonStatsFlags> handler = FLAGS.get(indexMetric);\n+ if (handler != null) {\n+ handler.accept(flags);\n+ } else {\n+ invalidIndexMetrics.add(indexMetric);\n+ }\n }\n+\n+ if (invalidIndexMetrics.contains(\"percolate\")) {\n+ deprecationLogger.deprecated(\n+ \"percolate stats are no longer available and requests for percolate stats will fail starting in 6.0.0\");\n+ invalidIndexMetrics.remove(\"percolate\");\n+ }\n+\n+ if (!invalidIndexMetrics.isEmpty()) {\n+ throw new IllegalArgumentException(unrecognized(request, invalidIndexMetrics, FLAGS.keySet(), \"index metric\"));\n+ }\n+\n nodesStatsRequest.indices(flags);\n }\n+ } else if (request.hasParam(\"index_metric\")) {\n+ throw new IllegalArgumentException(\n+ String.format(\n+ Locale.ROOT,\n+ \"request [%s] contains index metrics [%s] but indices stats not requested\",\n+ request.path(),\n+ request.param(\"index_metric\")));\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java", "status": "modified" }, { "diff": "@@ -36,7 +36,13 @@\n \n import java.io.IOException;\n import java.util.Collections;\n+import java.util.HashMap;\n+import java.util.HashSet;\n+import java.util.Locale;\n+import java.util.Map;\n import java.util.Set;\n+import java.util.TreeSet;\n+import java.util.function.Consumer;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n import static org.elasticsearch.rest.RestStatus.OK;\n@@ -49,11 +55,34 @@ public RestIndicesStatsAction(Settings settings, RestController controller) {\n super(settings);\n controller.registerHandler(GET, \"/_stats\", this);\n controller.registerHandler(GET, \"/_stats/{metric}\", this);\n- controller.registerHandler(GET, \"/_stats/{metric}/{indexMetric}\", this);\n controller.registerHandler(GET, \"/{index}/_stats\", this);\n controller.registerHandler(GET, \"/{index}/_stats/{metric}\", this);\n }\n \n+ static Map<String, Consumer<IndicesStatsRequest>> METRICS;\n+\n+ static {\n+ final Map<String, Consumer<IndicesStatsRequest>> metrics = new HashMap<>();\n+ metrics.put(\"docs\", r -> r.docs(true));\n+ metrics.put(\"store\", r -> r.store(true));\n+ metrics.put(\"indexing\", r -> r.indexing(true));\n+ metrics.put(\"search\", r -> r.search(true));\n+ metrics.put(\"suggest\", r -> r.search(true));\n+ metrics.put(\"get\", r -> r.get(true));\n+ metrics.put(\"merge\", r -> r.merge(true));\n+ metrics.put(\"refresh\", r -> r.refresh(true));\n+ metrics.put(\"flush\", r -> r.flush(true));\n+ metrics.put(\"warmer\", r -> r.warmer(true));\n+ metrics.put(\"query_cache\", r -> r.queryCache(true));\n+ metrics.put(\"segments\", r -> r.segments(true));\n+ metrics.put(\"fielddata\", r -> r.fieldData(true));\n+ metrics.put(\"completion\", r -> r.completion(true));\n+ metrics.put(\"request_cache\", r -> r.requestCache(true));\n+ metrics.put(\"recovery\", r -> r.recovery(true));\n+ metrics.put(\"translog\", r -> r.translog(true));\n+ METRICS = Collections.unmodifiableMap(metrics);\n+ }\n+\n @Override\n public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {\n IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();\n@@ -65,24 +94,34 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC\n // short cut, if no metrics have been specified in URI\n if (metrics.size() == 1 && metrics.contains(\"_all\")) {\n indicesStatsRequest.all();\n+ } else if (metrics.contains(\"_all\")) {\n+ throw new IllegalArgumentException(\n+ String.format(Locale.ROOT,\n+ \"request [%s] contains _all and individual metrics [%s]\",\n+ request.path(),\n+ request.param(\"metric\")));\n } else {\n indicesStatsRequest.clear();\n- indicesStatsRequest.docs(metrics.contains(\"docs\"));\n- indicesStatsRequest.store(metrics.contains(\"store\"));\n- indicesStatsRequest.indexing(metrics.contains(\"indexing\"));\n- indicesStatsRequest.search(metrics.contains(\"search\") || metrics.contains(\"suggest\"));\n- indicesStatsRequest.get(metrics.contains(\"get\"));\n- indicesStatsRequest.merge(metrics.contains(\"merge\"));\n- indicesStatsRequest.refresh(metrics.contains(\"refresh\"));\n- indicesStatsRequest.flush(metrics.contains(\"flush\"));\n- indicesStatsRequest.warmer(metrics.contains(\"warmer\"));\n- indicesStatsRequest.queryCache(metrics.contains(\"query_cache\"));\n- indicesStatsRequest.segments(metrics.contains(\"segments\"));\n- indicesStatsRequest.fieldData(metrics.contains(\"fielddata\"));\n- indicesStatsRequest.completion(metrics.contains(\"completion\"));\n- indicesStatsRequest.requestCache(metrics.contains(\"request_cache\"));\n- indicesStatsRequest.recovery(metrics.contains(\"recovery\"));\n- indicesStatsRequest.translog(metrics.contains(\"translog\"));\n+ // use a sorted set so the unrecognized parameters appear in a reliable sorted order\n+ final Set<String> invalidMetrics = new TreeSet<>();\n+ for (final String metric : metrics) {\n+ final Consumer<IndicesStatsRequest> consumer = METRICS.get(metric);\n+ if (consumer != null) {\n+ consumer.accept(indicesStatsRequest);\n+ } else {\n+ invalidMetrics.add(metric);\n+ }\n+ }\n+\n+ if (invalidMetrics.contains(\"percolate\")) {\n+ deprecationLogger.deprecated(\n+ \"percolate stats are no longer available and requests for percolate stats will fail starting in 6.0.0\");\n+ invalidMetrics.remove(\"percolate\");\n+ }\n+\n+ if (!invalidMetrics.isEmpty()) {\n+ throw new IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keySet(), \"metric\"));\n+ }\n }\n \n if (request.hasParam(\"groups\")) {", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java", "status": "modified" }, { "diff": "@@ -0,0 +1,152 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.rest.action.admin.cluster;\n+\n+import org.elasticsearch.client.node.NodeClient;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.rest.RestController;\n+import org.elasticsearch.rest.RestRequest;\n+import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.test.rest.FakeRestRequest;\n+\n+import java.io.IOException;\n+import java.util.Collections;\n+import java.util.HashMap;\n+import java.util.HashSet;\n+import java.util.Set;\n+\n+import static org.hamcrest.CoreMatchers.containsString;\n+import static org.hamcrest.object.HasToString.hasToString;\n+import static org.mockito.Mockito.mock;\n+\n+public class RestNodesStatsActionTests extends ESTestCase {\n+\n+ private RestNodesStatsAction action;\n+\n+ @Override\n+ public void setUp() throws Exception {\n+ super.setUp();\n+ action = new RestNodesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet()));\n+ }\n+\n+ public void testUnrecognizedMetric() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ final String metric = randomAsciiOfLength(64);\n+ params.put(\"metric\", metric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(e, hasToString(containsString(\"request [/_nodes/stats] contains unrecognized metric: [\" + metric + \"]\")));\n+ }\n+\n+ public void testUnrecognizedMetricDidYouMean() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"os,transprot,unrecognized\");\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(\n+ e,\n+ hasToString(\n+ containsString(\n+ \"request [/_nodes/stats] contains unrecognized metrics: [transprot] -> did you mean [transport]?, [unrecognized]\")));\n+ }\n+\n+ public void testAllRequestWithOtherMetrics() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ final String metric = randomSubsetOf(1, RestNodesStatsAction.METRICS.keySet()).get(0);\n+ params.put(\"metric\", \"_all,\" + metric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(e, hasToString(containsString(\"request [/_nodes/stats] contains _all and individual metrics [_all,\" + metric + \"]\")));\n+ }\n+\n+ public void testUnrecognizedIndexMetric() {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"indices\");\n+ final String indexMetric = randomAsciiOfLength(64);\n+ params.put(\"index_metric\", indexMetric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(e, hasToString(containsString(\"request [/_nodes/stats] contains unrecognized index metric: [\" + indexMetric + \"]\")));\n+ }\n+\n+ public void testUnrecognizedIndexMetricDidYouMean() {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"indices\");\n+ params.put(\"index_metric\", \"indexing,stroe,unrecognized\");\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(\n+ e,\n+ hasToString(\n+ containsString(\n+ \"request [/_nodes/stats] contains unrecognized index metrics: [stroe] -> did you mean [store]?, [unrecognized]\")));\n+ }\n+\n+ public void testIndexMetricsWithPercolate() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"indices\");\n+ params.put(\"index_metric\", \"percolate\");\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ action.prepareRequest(request, mock(NodeClient.class));\n+ }\n+\n+ public void testIndexMetricsRequestWithoutIndicesMetric() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ final Set<String> metrics = new HashSet<>(RestNodesStatsAction.METRICS.keySet());\n+ metrics.remove(\"indices\");\n+ params.put(\"metric\", randomSubsetOf(1, metrics).get(0));\n+ final String indexMetric = randomSubsetOf(1, RestNodesStatsAction.FLAGS.keySet()).get(0);\n+ params.put(\"index_metric\", indexMetric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(\n+ e,\n+ hasToString(\n+ containsString(\"request [/_nodes/stats] contains index metrics [\" + indexMetric + \"] but indices stats not requested\")));\n+ }\n+\n+ public void testIndexMetricsRequestOnAllRequest() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"_all\");\n+ final String indexMetric = randomSubsetOf(1, RestNodesStatsAction.FLAGS.keySet()).get(0);\n+ params.put(\"index_metric\", indexMetric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_nodes/stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(\n+ e,\n+ hasToString(\n+ containsString(\"request [/_nodes/stats] contains index metrics [\" + indexMetric + \"] but all stats requested\")));\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java", "status": "added" }, { "diff": "@@ -0,0 +1,90 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.rest.action.admin.indices;\n+\n+import org.elasticsearch.client.node.NodeClient;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.rest.RestController;\n+import org.elasticsearch.rest.RestRequest;\n+import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.test.rest.FakeRestRequest;\n+\n+import java.io.IOException;\n+import java.util.Collections;\n+import java.util.HashMap;\n+\n+import static org.hamcrest.CoreMatchers.containsString;\n+import static org.hamcrest.object.HasToString.hasToString;\n+import static org.mockito.Mockito.mock;\n+\n+public class RestIndicesStatsActionTests extends ESTestCase {\n+\n+ private RestIndicesStatsAction action;\n+\n+ @Override\n+ public void setUp() throws Exception {\n+ super.setUp();\n+ action = new RestIndicesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet()));\n+ }\n+\n+ public void testUnrecognizedMetric() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ final String metric = randomAsciiOfLength(64);\n+ params.put(\"metric\", metric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(e, hasToString(containsString(\"request [/_stats] contains unrecognized metric: [\" + metric + \"]\")));\n+ }\n+\n+ public void testUnrecognizedMetricDidYouMean() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"request_cache,fieldata,unrecognized\");\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(\n+ e,\n+ hasToString(\n+ containsString(\n+ \"request [/_stats] contains unrecognized metrics: [fieldata] -> did you mean [fielddata]?, [unrecognized]\")));\n+ }\n+\n+ public void testAllRequestWithOtherMetrics() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ final String metric = randomSubsetOf(1, RestIndicesStatsAction.METRICS.keySet()).get(0);\n+ params.put(\"metric\", \"_all,\" + metric);\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_stats\").withParams(params).build();\n+ final IllegalArgumentException e = expectThrows(\n+ IllegalArgumentException.class,\n+ () -> action.prepareRequest(request, mock(NodeClient.class)));\n+ assertThat(e, hasToString(containsString(\"request [/_stats] contains _all and individual metrics [_all,\" + metric + \"]\")));\n+ }\n+\n+ public void testIndexMetricsWithPercolate() throws IOException {\n+ final HashMap<String, String> params = new HashMap<>();\n+ params.put(\"metric\", \"percolate\");\n+ final RestRequest request = new FakeRestRequest.Builder().withPath(\"/_stats\").withParams(params).build();\n+ action.prepareRequest(request, mock(NodeClient.class));\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java", "status": "added" }, { "diff": "@@ -65,12 +65,11 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`,\n \n [source,js]\n --------------------------------------------------\n-# return indices and os\n-curl -XGET 'http://localhost:9200/_nodes/stats/os'\n+# return just indices\n+curl -XGET 'http://localhost:9200/_nodes/stats/indices'\n # return just os and process\n curl -XGET 'http://localhost:9200/_nodes/stats/os,process'\n-# specific type endpoint\n-curl -XGET 'http://localhost:9200/_nodes/stats/process'\n+# return just process for node with IP address 10.0.0.1\n curl -XGET 'http://localhost:9200/_nodes/10.0.0.1/stats/process'\n --------------------------------------------------\n \n@@ -280,27 +279,45 @@ the current running process:\n `process.mem.total_virtual_in_bytes`::\n \tSize in bytes of virtual memory that is guaranteed to be available to the running process\n \n-\n [float]\n-[[field-data]]\n-=== Field data statistics\n+[[node-indices-stats]]\n+=== Indices statistics\n \n-You can get information about field data memory usage on node\n-level or on index level.\n+You can get information about indices stats on node level or on index level.\n \n [source,js]\n --------------------------------------------------\n-# Node Stats\n-curl -XGET 'http://localhost:9200/_nodes/stats/indices/?fields=field1,field2&pretty'\n+# Node level\n+curl -XGET 'http://localhost:9200/_nodes/stats/indices/fielddata?fields=field1,field2&pretty'\n \n-# Indices Stat\n+# Index level\n curl -XGET 'http://localhost:9200/_stats/fielddata/?fields=field1,field2&pretty'\n \n # You can use wildcards for field names\n+curl -XGET 'http://localhost:9200/_nodes/stats/indices/fielddata?fields=field*&pretty'\n curl -XGET 'http://localhost:9200/_stats/fielddata/?fields=field*&pretty'\n-curl -XGET 'http://localhost:9200/_nodes/stats/indices/?fields=field*&pretty'\n --------------------------------------------------\n \n+Supported metrics are:\n+\n+* `completion`\n+* `docs`\n+* `fielddata`\n+* `flush`\n+* `get`\n+* `indexing`\n+* `merge`\n+* `query_cache`\n+* `recovery`\n+* `refresh`\n+* `request_cache`\n+* `search`\n+* `segments`\n+* `store`\n+* `suggest`\n+* `translog`\n+* `warmer`\n+\n [float]\n [[search-groups]]\n === Search groups", "filename": "docs/reference/cluster/nodes-stats.asciidoc", "status": "modified" }, { "diff": "@@ -74,7 +74,7 @@ the <<indices-stats,indices stats>> API:\n \n [source,sh]\n --------------------------------------------------\n-GET twitter/_stats/commit?level=shards\n+GET twitter/_stats?level=shards\n --------------------------------------------------\n // CONSOLE\n // TEST[s/^/PUT twitter\\n/]", "filename": "docs/reference/indices/flush.asciidoc", "status": "modified" }, { "diff": "@@ -41,3 +41,15 @@ Plugging in a `UnicastHostsProvider` for zen discovery is now pull based. Implem\n ==== ZenPing and MasterElectService pluggability removed\n \n These classes are no longer pluggable. Either implement your own discovery, or extend from ZenDiscovery and customize as necessary.\n+\n+[[breaking_51_other_api_changes]]\n+[float]\n+=== Other API changes\n+\n+==== Indices stats and node stats API unrecognized metrics\n+\n+The indices stats and node stats APIs allow querying Elasticsearch for a variety of metrics. Previous versions of\n+Elasticsearch would silently accept unrecognized metrics (e.g., typos like \"transprot\"). In 5.1.0, this is no longer\n+the case; unrecognized metrics will cause the request to fail. There is one exception to this which is the percolate\n+metric which was removed in 5.0.0 but requests for these will only produce a warning in the 5.x series starting with\n+5.1.0 and will fail like any other unrecognized metric in 6.0.0.", "filename": "docs/reference/migration/migrate_5_1.asciidoc", "status": "modified" }, { "diff": "@@ -100,3 +100,21 @@ setup:\n - is_false: indices.test1\n - is_true: indices.test2\n \n+---\n+\"Indices stats unrecognized parameter\":\n+ - do:\n+ indices.stats:\n+ metric: [ fieldata ]\n+ ignore: 400\n+\n+ - match: { status: 400 }\n+ - match: { error.type: illegal_argument_exception }\n+ - match: { error.reason: \"request [/_stats/fieldata] contains unrecognized metric: [fieldata] -> did you mean [fielddata]?\" }\n+\n+---\n+\"Indices stats warns on percolate\":\n+ - do:\n+ warnings:\n+ - 'percolate stats are no longer available and requests for percolate stats will fail starting in 6.0.0'\n+ indices.stats:\n+ metric: [ percolate ]", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yaml", "status": "modified" }, { "diff": "@@ -20,3 +20,23 @@\n level: \"indices\"\n \n - is_true: nodes.$master.indices.indices\n+\n+---\n+\"Nodes stats unrecognized parameter\":\n+ - do:\n+ nodes.stats:\n+ metric: [ transprot ]\n+ ignore: 400\n+\n+ - match: { status: 400 }\n+ - match: { error.type: illegal_argument_exception }\n+ - match: { error.reason: \"request [/_nodes/stats/transprot] contains unrecognized metric: [transprot] -> did you mean [transport]?\" }\n+\n+---\n+\"Node stats warns on percolate\":\n+ - do:\n+ warnings:\n+ - 'percolate stats are no longer available and requests for percolate stats will fail starting in 6.0.0'\n+ nodes.stats:\n+ metric: [ indices ]\n+ index_metric: [ percolate ]", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml", "status": "modified" } ] }
{ "body": "```\n 2> REPRODUCE WITH: gradle :core:test -Dtests.seed=17AC5B1BB751F423 -Dtests.class=org.elasticsearch.common.rounding.TimeZoneRoundingTests -Dtests.method=\"testRoundingRandom\" -Dtests.security.manager=true -Dtests.locale=ko -Dtests.timezone=Indian/Comoro\nFAILURE 0.69s | TimeZoneRoundingTests.testRoundingRandom <<< FAILURES!\n > Throwable #1: java.lang.AssertionError: dateBetween should round down to roundedDate\n > Expected: Expected: 2024-10-27T00:00:00.000Z [1729987200000] \n > but: was \"2024-10-27T00:00:00.000-01:00 [1729990800000]\"\n > at __randomizedtesting.SeedInfo.seed([17AC5B1BB751F423:B8E2E3098394017E]:0)\n > at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)\n > at org.elasticsearch.common.rounding.TimeZoneRoundingTests.assertInterval(TimeZoneRoundingTests.java:541)\n > at org.elasticsearch.common.rounding.TimeZoneRoundingTests.testRoundingRandom(TimeZoneRoundingTests.java:195)\n > at java.lang.Thread.run(Thread.java:745)\n 2> NOTE: leaving temporary files on disk at: /Users/simon/projects/elasticsearch/core/build/testrun/test/J0/temp/org.elasticsearch.common.rounding.TimeZoneRoundingTests_17AC5B1BB751F423-001\n 2> NOTE: test params are: codec=Asserting(Lucene62): {}, docValues:{}, maxPointsInLeafNode=386, maxMBSortInHeap=5.970189533938357, sim=ClassicSimilarity, locale=ko, timezone=Indian/Comoro\n 2> NOTE: Mac OS X 10.11.6 x86_64/Oracle Corporation 1.8.0_66 (64-bit)/cpus=4,threads=1,free=434107792,total=514850816\n 2> NOTE: All tests run in this JVM: [TimeZoneRoundingTests]\nCompleted [1/1] in 2.26s, 1 test, 1 failure <<< FAILURES!\n```\n", "comments": [ { "body": "@cbuescher can you look\n", "created_at": "2016-10-10T13:27:44Z" }, { "body": "I couldn't reproduce this on master with the given seed anymore, but going back to some earlier commit I was able get the random parameters that made the test trip in this case. Problem in that test occurs with:\n\n```\nDateTimeUnit.DAY_OF_MONTH\nDateTimeZone.forID(\"Atlantic/Azores\")\ndate = 1729989444634l;\n```\n\n Still need to do some digging what goes wrong here.\n", "created_at": "2016-11-14T11:50:05Z" }, { "body": "@cbuescher Just a side note but I updated JodaTime to v2.9.5 recently and they fixed some bugs like https://github.com/JodaOrg/joda-time/issues/373. Maybe related to this issue.\n", "created_at": "2016-11-14T11:57:02Z" }, { "body": "I found the root cause for this. The test error reveals an interesting edge case with DST overlaps (happening usually on DST end, turning back the clock one hour) in combination with rounding to day intervals.\n\nIn this particular case (tz: \"Atlantic/Azores\", dates around the DST transition on e.g. 2000-10-29T01:00:00.000Z) we currently get the overlapping part during the DST transition as a separate rounding interval as illustrated in this table:\n\n| date | round(date) | nextRoundingValue(date) |\n| --- | --- | --- |\n| 2000-10-28T22:00:00.000Z |  2000-10-28T00:00:00.000Z | 2000-10-29T00:00:00.000Z |\n| 2000-10-28T23:00:00.000Z |  2000-10-28T00:00:00.000Z | 2000-10-29T00:00:00.000Z |\n| **2000-10-29T00:00:00.000Z** |  **2000-10-29T00:00:00.000Z** | 2000-10-30T00:00:00.000-01:00 |\n| **2000-10-29T00:00:00.000-01:00** |  **2000-10-29T00:00:00.000-01:00** | 2000-10-30T00:00:00.000-01:00 |\n| 2000-10-29T01:00:00.000-01:00 |  2000-10-29T00:00:00.000-01:00 | 2000-10-30T00:00:00.000-01:00 |\n| 2000-10-29T02:00:00.000-01:00 |  2000-10-29T00:00:00.000-01:00 | 2000-10-30T00:00:00.000-01:00 |\n\nAccording to https://www.timeanddate.com/time/change/portugal/horta?year=2000 the DST change happenes on Oct-29th at 1am local time, turning back the clock one hour (to offset -01:00). Currently the dates between \"2000-10-29T00:00:00.000Z\" and \"2000-10-29T01:00:00.000Z\" all round down to \"2000-10-29T00:00:00.000Z\" (before the transition) and the dates after but before next midnight round down to \"2000-10-29T00:00:00.000-01:00\". For a day rounding we would prefer a 25h hour bucket for \"2000-10-29\" to a one hour + another 24h bucket I guess. I think I have a fix that detects this kind of situation and merges the two intervals.\n", "created_at": "2016-11-14T17:05:15Z" } ], "number": 20833, "title": "TimeZoneRoundingTests#testRoundingRandom fails reproducibly" }
{ "body": "When using TimeUnitRounding with a DAY_OF_MONTH unit, failing tests in #20833 uncovered an issue when the DST shift happenes just one hour after midnight local time and sets back the clock to midnight, leading to an overlap. Previously this would lead to two different rounding values, depending on whether a date before or after the transition was rounded. This change detects this special case and correct for it by using the previous rounding date for both cases.\r\n\r\nIn this particular case (tz: \"Atlantic/Azores\", dates around the DST transition on e.g. 2000-10-29T01:00:00.000Z, unit: DAY_OF_MONTH) we currently get the overlapping part during the DST transition as a separate rounding interval as illustrated in this table:\r\n\r\n| date | round(date) | nextRoundingValue(date) |\r\n| --------------------------------|---------------------------------|---------------------------------|\r\n| 2000-10-28T22:00:00.000Z | 2000-10-28T00:00:00.000Z | 2000-10-29T00:00:00.000Z |\r\n| 2000-10-28T23:00:00.000Z | 2000-10-28T00:00:00.000Z | 2000-10-29T00:00:00.000Z |\r\n| 2000-10-29T00:00:00.000Z | **2000-10-29T00:00:00.000Z** | 2000-10-30T00:00:00.000-01:00 |\r\n| 2000-10-29T00:00:00.000-01:00 | **2000-10-29T00:00:00.000-01:00** | 2000-10-30T00:00:00.000-01:00 |\r\n| 2000-10-29T01:00:00.000-01:00 | 2000-10-29T00:00:00.000-01:00 | 2000-10-30T00:00:00.000-01:00 |\r\n| 2000-10-29T02:00:00.000-01:00 | 2000-10-29T00:00:00.000-01:00 | 2000-10-30T00:00:00.000-01:00 |\r\n\r\nAccording to https://www.timeanddate.com/time/change/portugal/horta?year=2000 the DST change happenes on Oct-29th at 1am local time, turning back the clock one hour (to offset -01:00). Currently the dates between \"2000-10-29T00:00:00.000Z\" and \"2000-10-29T01:00:00.000Z\" all round down to \"2000-10-29T00:00:00.000Z\" (before the transition) and the dates after but before next midnight round down to \"2000-10-29T00:00:00.000-01:00\". For a day rounding we would prefer a 25h hour bucket for \"2000-10-29\". \r\n\r\nWith this fix, the situation above changes to \r\n\r\n| date | round(date) | nextRoundingValue(date) |\r\n| --------------------------------|---------------------------------|---------------------------------|\r\n| 2000-10-28T22:00:00.000Z | 2000-10-28T00:00:00.000Z | 2000-10-29T00:00:00.000Z |\r\n| 2000-10-28T23:00:00.000Z | 2000-10-28T00:00:00.000Z | 2000-10-29T00:00:00.000Z |\r\n| 2000-10-29T00:00:00.000Z | 2000-10-29T00:00:00.000Z | 2000-10-30T00:00:00.000-01:00 |\r\n| 2000-10-29T00:00:00.000-01:00 | 2000-10-29T00:00:00.000Z | 2000-10-30T00:00:00.000-01:00 |\r\n| 2000-10-29T01:00:00.000-01:00 | 2000-10-29T00:00:00.000Z | 2000-10-30T00:00:00.000-01:00 |\r\n| 2000-10-29T02:00:00.000-01:00 | 2000-10-29T00:00:00.000Z | 2000-10-30T00:00:00.000-01:00 |\r\n\r\nSo now everything on \"2000-10-29\" gets rounded down to \"2000-10-29T00:00:00.000Z\".\r\n\r\nCloses #20833", "number": 21550, "review_comments": [ { "body": "couldn't this be just `return offset < offsetTicBefore;`?\n", "created_at": "2016-11-15T09:17:51Z" }, { "body": "Skipping intervals that are less that the field duration sounds good to me regardless of whether we are on a DST overlap, so I'm wondering whether we want to get rid of this `if` statement (assuming there are no performance implications)?\n", "created_at": "2016-11-15T09:19:49Z" }, { "body": "sure\n", "created_at": "2016-11-15T10:32:51Z" }, { "body": "I think generally \"skipping\" (or merging, like here) intervals that are less than the field duration is this is not always the right thing. Imagine - for example - a day that due to DST shift has only 23h (typically on DST start). We don't want to join that with the previous or the following day. I can look into this once again but its very likely some of our existing tests blow up when doing this.\n", "created_at": "2016-11-15T10:45:51Z" }, { "body": "I just tried removing this check and it fails various test cases, so I think this supports the abover reasoning\n", "created_at": "2016-11-15T10:47:09Z" } ], "title": "Fix time zone rounding edge case for DST overlaps" }
{ "commits": [ { "message": "Fix time zone rounding edge case for DST overlaps\n\nWhen using TimeUnitRounding with a DAY_OF_MONTH unit, failing tests in #20833\nuncovered an issue when the DST shift happenes just one hour after midnight\nlocal time and sets back the clock to midnight, leading to an overlap.\nPreviously this would lead to two different rounding values, depending on\nwhether a date before or after the transition was rounded. This change detects\nthis special case and correct for it by using the previous rounding date for\nboth cases.\n\nCloses #20833" } ], "files": [ { "diff": "@@ -128,15 +128,38 @@ public byte id() {\n @Override\n public long round(long utcMillis) {\n long rounded = field.roundFloor(utcMillis);\n- if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {\n- // in this case, we crossed a time zone transition. In some edge\n- // cases this will\n- // result in a value that is not a rounded value itself. We need\n- // to round again\n- // to make sure. This will have no affect in cases where\n- // 'rounded' was already a proper\n- // rounded value\n- rounded = field.roundFloor(rounded);\n+ if (timeZone.isFixed() == false) {\n+ // special cases for non-fixed time zones with dst transitions\n+ if (timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {\n+ /*\n+ * the offset change indicates a dst transition. In some\n+ * edge cases this will result in a value that is not a\n+ * rounded value before the transition. We round again to\n+ * make sure we really return a rounded value. This will\n+ * have no effect in cases where we already had a valid\n+ * rounded value\n+ */\n+ rounded = field.roundFloor(rounded);\n+ } else {\n+ /*\n+ * check if the current time instant is at a start of a DST\n+ * overlap by comparing the offset of the instant and the\n+ * previous millisecond. We want to detect negative offset\n+ * changes that result in an overlap\n+ */\n+ if (timeZone.getOffset(rounded) < timeZone.getOffset(rounded - 1)) {\n+ /*\n+ * we are rounding a date just after a DST overlap. if\n+ * the overlap is smaller than the time unit we are\n+ * rounding to, we want to add the overlapping part to\n+ * the following rounding interval\n+ */\n+ long previousRounded = field.roundFloor(rounded - 1);\n+ if (rounded - previousRounded < field.getDurationField().getUnitMillis()) {\n+ rounded = previousRounded;\n+ }\n+ }\n+ }\n }\n assert rounded == field.roundFloor(rounded);\n return rounded;", "filename": "core/src/main/java/org/elasticsearch/common/rounding/Rounding.java", "status": "modified" }, { "diff": "@@ -514,6 +514,44 @@ public void testEdgeCasesTransition() {\n }\n }\n \n+ /**\n+ * tests for dst transition with overlaps and day roundings.\n+ */\n+ public void testDST_END_Edgecases() {\n+ // First case, dst happens at 1am local time, switching back one hour.\n+ // We want the overlapping hour to count for the next day, making it a 25h interval\n+\n+ DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH;\n+ DateTimeZone tz = DateTimeZone.forID(\"Atlantic/Azores\");\n+ Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);\n+\n+ // Sunday, 29 October 2000, 01:00:00 clocks were turned backward 1 hour\n+ // to Sunday, 29 October 2000, 00:00:00 local standard time instead\n+\n+ long midnightBeforeTransition = time(\"2000-10-29T00:00:00\", tz);\n+ long nextMidnight = time(\"2000-10-30T00:00:00\", tz);\n+\n+ assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz);\n+\n+ // Second case, dst happens at 0am local time, switching back one hour to 23pm local time.\n+ // We want the overlapping hour to count for the previous day here\n+\n+ tz = DateTimeZone.forID(\"America/Lima\");\n+ rounding = new Rounding.TimeUnitRounding(timeUnit, tz);\n+\n+ // Sunday, 1 April 1990, 00:00:00 clocks were turned backward 1 hour to\n+ // Saturday, 31 March 1990, 23:00:00 local standard time instead\n+\n+ midnightBeforeTransition = time(\"1990-03-31T00:00:00.000-04:00\");\n+ nextMidnight = time(\"1990-04-01T00:00:00.000-05:00\");\n+ assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz);\n+\n+ // make sure the next interval is 24h long again\n+ long midnightAfterTransition = time(\"1990-04-01T00:00:00.000-05:00\");\n+ nextMidnight = time(\"1990-04-02T00:00:00.000-05:00\");\n+ assertInterval(midnightAfterTransition, nextMidnight, rounding, 24 * 60, tz);\n+ }\n+\n /**\n * Test that time zones are correctly parsed. There is a bug with\n * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373)", "filename": "core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java", "status": "modified" } ] }
{ "body": "I'm trying to run tribe node using elasticsearch 2.2.0. I have cluster of 2 machines with the following config:\n\n<pre>\nnetwork.host: 0.0.0.0\npath.data: /var/lib/elasticsearch/\npath.logs: /var/log/elasticsearch/\ncluster.name: logstash-data\ndiscovery.zen.ping.multicast.enabled: false\ndiscovery.zen.ping.unicast.hosts: [\"10.16.1.32\", \"10.16.1.75\"]\n</pre>\n\nOn another machine I run 2 elasticsearch instances with following configs:\n\n<pre>\ntransport.tcp.port: 9310\nhttp.port: 9210\nnetwork.host: 0.0.0.0\npath.data: /var/lib/elasticsearch/\npath.logs: /var/log/elasticsearch/\ncluster.name: logstash-kibana\n</pre>\n\nand\n\n<pre>\n#transport.tcp.port: 9301\n#http.port: 9201\nnetwork.host: 0.0.0.0\npath.data: /var/lib/elasticsearch/\npath.logs: /var/log/elasticsearch/\n\ntribe:\n data:\n cluster.name: logstash-data\n discovery.zen.ping.multicast.enabled: false\n discovery.zen.ping.unicast.hosts: [\"10.16.1.32\", \"10.16.1.75\"]\n network.host: 0.0.0.0\n kibana:\n cluster.name: logstash-kibana\n discovery.zen.ping.multicast.enabled: false\n discovery.zen.ping.unicast.hosts: [\"127.0.0.1:9310\"]\n network.host: 0.0.0.0\n</pre>\n\nThis config works ok. But if I'll uncomment custom port settings I'm getting following exception:\n\n<pre>\nlog4j:WARN No appenders could be found for logger (bootstrap).\nlog4j:WARN Please initialize the log4j system properly.\nlog4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.\nException in thread \"main\" BindTransportException[Failed to bind to [9300-9400]]; nested: ChannelException[Failed to bind to: /0.0.0.0:9400]; nested: AccessControlException[access denied (\"java.net.SocketPermission\" \"localhost:9400\" \"listen,resolve\")];\nLikely root cause: java.security.AccessControlException: access denied (\"java.net.SocketPermission\" \"localhost:9400\" \"listen,resolve\")\n at java.security.AccessControlContext.checkPermission(AccessControlContext.java:457)\n at java.security.AccessController.checkPermission(AccessController.java:884)\n at java.lang.SecurityManager.checkPermission(SecurityManager.java:549)\n at java.lang.SecurityManager.checkListen(SecurityManager.java:1131)\n at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:221)\n at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)\n at org.jboss.netty.channel.socket.nio.NioServerBoss$RegisterTask.run(NioServerBoss.java:193)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.processTaskQueue(AbstractNioSelector.java:391)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:315)\n at org.jboss.netty.channel.socket.nio.NioServerBoss.run(NioServerBoss.java:42)\n at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)\n at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nRefer to the log for complete error details.\n</pre>\n", "comments": [ { "body": "The problem here is the tribe nodes create an internal node for each cluster, and bind to the configured port for that client node. When you specify a custom ports for http and transport, security manager is configured with those ports, instead of the default ranges, which for transport are 9300-9400. But then the tribe client nodes continue to use the default ports, and fail when attempting to bind.\n\nThe only way I can see to fix this is to add knowledge about tribe settings to `Security.addBindPermissions`. /cc @rmuir \n", "created_at": "2016-02-11T07:19:53Z" }, { "body": "So the problem isn't that the values differ from the standard, it's that the values are pinned at all correct? To mitigate this I attempted to pin back to the default ports of 9200 for http and 9300 for transport but still received the exception. If the value is pinned the security manager and clients all try and use the pinned port, which fail, whereas if the value is unset the security manager uses the lowest free port, which is the default, and the clients use the subsequent ports in the ranges. Is my understanding correct?\n\nEDIT: No. The port value must be a range to accommodate the clients, and the tribe node will start with a range that starts with the default, but will fail to start if the range does not start from the default.\n\n``` yaml\nhttp.port: 9500-9502\ntransport.tcp.port: 9600-9602\n```\n\n```\nException in thread \"main\" BindTransportException[Failed to bind to [9300-9400]]; nested: ChannelException[Failed to bind to: /172.17.0.17:9400]; nested: AccessControlException[access denied (\"java.net.SocketPermission\" \"localhost:9400\" \"listen,resolve\")];\nLikely root cause: java.security.AccessControlException: access denied (\"java.net.SocketPermission\" \"localhost:9400\" \"listen,resolve\")\n at java.security.AccessControlContext.checkPermission(AccessControlContext.java:472)\n at java.security.AccessController.checkPermission(AccessController.java:884)\n at java.lang.SecurityManager.checkPermission(SecurityManager.java:549)\n at java.lang.SecurityManager.checkListen(SecurityManager.java:1131)\n at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:221)\n at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)\n at org.jboss.netty.channel.socket.nio.NioServerBoss$RegisterTask.run(NioServerBoss.java:193)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.processTaskQueue(AbstractNioSelector.java:391)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:315)\n at org.jboss.netty.channel.socket.nio.NioServerBoss.run(NioServerBoss.java:42)\n at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)\n at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nRefer to the log for complete error details.\n```\n", "created_at": "2016-03-28T23:29:46Z" }, { "body": "Was there any solution to this issue? I am facing the same\n", "created_at": "2016-05-12T09:22:36Z" }, { "body": "@chinmoydas1 The only workaround at the moment is to use the default ports. I've marked this as adoptme.\n", "created_at": "2016-05-15T03:05:16Z" }, { "body": "+1\n", "created_at": "2016-08-10T09:25:20Z" }, { "body": "I am facing the same problem\n", "created_at": "2016-09-14T06:55:04Z" }, { "body": "my tribe config\n\n```\nabonuccelli@w530 /opt/elk/TEST/tribe_test/tribeNode $ egrep '^[^#]+' elasticsearch-2.3.5/config/elasticsearch.yml \ncluster.name: tribe\nnetwork.host: 192.168.1.105\nhttp.port: 9220\ntransport.tcp.port: 9320\ntribe:\n A: \n cluster.name: clusterA\n discovery.zen.ping.unicast.hosts: [\"192.168.1.105:9300\"]\n network.publish_host: 192.168.1.105\n transport.tcp.port: 9340\n B: \n cluster.name: clusterB\n discovery.zen.ping.unicast.hosts: [\"192.168.1.105:9301\"]\n network.publish_host: 192.168.1.105\n transport.tcp.port: 9341\n M:\n cluster.name: monitoring\n discovery.zen.ping.unicast.hosts: [\"192.168.1.105:9310\"]\n network.publish_host: 192.168.1.105\n transport.tcp.port: 9342\nmarvel.enabled: false\n\n```\n\nwas getting\n\n```\n./elasticsearch-2.3.5/bin/elasticsearch\n[2016-09-14 09:51:06,229][INFO ][node ] [Venom] version[2.3.5], pid[5567], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:06,229][INFO ][node ] [Venom] initializing ...\n[2016-09-14 09:51:06,740][INFO ][plugins ] [Venom] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:08,052][INFO ][node ] [Venom/A] version[2.3.5], pid[5567], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:08,052][INFO ][node ] [Venom/A] initializing ...\n[2016-09-14 09:51:08,400][INFO ][plugins ] [Venom/A] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:08,679][INFO ][node ] [Venom/A] initialized\n[2016-09-14 09:51:08,680][INFO ][node ] [Venom/B] version[2.3.5], pid[5567], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:08,680][INFO ][node ] [Venom/B] initializing ...\n[2016-09-14 09:51:08,986][INFO ][plugins ] [Venom/B] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:09,189][INFO ][node ] [Venom/B] initialized\n[2016-09-14 09:51:09,190][INFO ][node ] [Venom/M] version[2.3.5], pid[5567], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:09,190][INFO ][node ] [Venom/M] initializing ...\n[2016-09-14 09:51:09,447][INFO ][plugins ] [Venom/M] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:09,593][INFO ][node ] [Venom/M] initialized\n[2016-09-14 09:51:09,598][INFO ][node ] [Venom] initialized\n[2016-09-14 09:51:09,599][INFO ][node ] [Venom] starting ...\n[2016-09-14 09:51:09,670][INFO ][transport ] [Venom] publish_address {192.168.1.105:9320}, bound_addresses {192.168.1.105:9320}\n[2016-09-14 09:51:09,673][INFO ][discovery ] [Venom] tribe/lINF7OL1S0yiuYt5nV154w\n[2016-09-14 09:51:09,674][WARN ][discovery ] [Venom] waited for 0s and no initial state was set by the discovery\n[2016-09-14 09:51:09,691][INFO ][http ] [Venom] publish_address {192.168.1.105:9220}, bound_addresses {192.168.1.105:9220}\n[2016-09-14 09:51:09,691][INFO ][node ] [Venom/A] starting ...\n[2016-09-14 09:51:09,743][INFO ][node ] [Venom/A] stopping ...\n[2016-09-14 09:51:09,745][INFO ][node ] [Venom/A] stopped\n[2016-09-14 09:51:09,745][INFO ][node ] [Venom/A] closing ...\n[2016-09-14 09:51:09,749][INFO ][node ] [Venom/A] closed\n[2016-09-14 09:51:09,750][INFO ][node ] [Venom/B] closing ...\n[2016-09-14 09:51:09,752][INFO ][node ] [Venom/B] closed\n[2016-09-14 09:51:09,752][INFO ][node ] [Venom/M] closing ...\n[2016-09-14 09:51:09,754][INFO ][node ] [Venom/M] closed\nException in thread \"main\" BindTransportException[Failed to bind to [9340]]; nested: ChannelException[Failed to bind to: /192.168.1.105:9340]; nested: AccessControlException[access denied (\"java.net.SocketPermission\" \"localhost:9340\" \"listen,resolve\")];\nLikely root cause: java.security.AccessControlException: access denied (\"java.net.SocketPermission\" \"localhost:9340\" \"listen,resolve\")\n at java.security.AccessControlContext.checkPermission(AccessControlContext.java:457)\n at java.security.AccessController.checkPermission(AccessController.java:884)\n at java.lang.SecurityManager.checkPermission(SecurityManager.java:549)\n at java.lang.SecurityManager.checkListen(SecurityManager.java:1131)\n at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:221)\n at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)\n at org.jboss.netty.channel.socket.nio.NioServerBoss$RegisterTask.run(NioServerBoss.java:193)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.processTaskQueue(AbstractNioSelector.java:391)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:315)\n at org.jboss.netty.channel.socket.nio.NioServerBoss.run(NioServerBoss.java:42)\n at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)\n at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nRefer to the log for complete error details.\n[2016-09-14 09:51:09,760][INFO ][node ] [Venom] stopping ...\n[2016-09-14 09:51:09,772][INFO ][node ] [Venom] stopped\n[2016-09-14 09:51:09,772][INFO ][node ] [Venom] closing ...\n[2016-09-14 09:51:09,774][INFO ][node ] [Venom] closed\n```\n\nI was able to workaround adding a security exception (use this at your own risk)\n\n```\nabonuccelli@w530 /opt/elk/TEST/tribe_test $ cat tribeNode/elasticsearch-2.3.5/java.policy \ngrant {\n permission java.net.SocketPermission \"localhost:9340\", \"listen,resolve\";\n};\n```\n\nthen\n\n```\nabonuccelli@w530 /opt/elk/TEST/tribe_test/tribeNode $ export ES_JAVA_OPTS=-Djava.security.policy=file:///opt/elk/TEST/tribe_test/tribeNode/elasticsearch-2.3.5/java.policy; ./elasticsearch-2.3.5/bin/elasticsearch\n[2016-09-14 09:51:37,629][INFO ][node ] [Satannish] version[2.3.5], pid[5749], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:37,630][INFO ][node ] [Satannish] initializing ...\n[2016-09-14 09:51:38,162][INFO ][plugins ] [Satannish] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:39,397][INFO ][node ] [Satannish/A] version[2.3.5], pid[5749], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:39,398][INFO ][node ] [Satannish/A] initializing ...\n[2016-09-14 09:51:39,656][INFO ][plugins ] [Satannish/A] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:39,892][INFO ][node ] [Satannish/A] initialized\n[2016-09-14 09:51:39,893][INFO ][node ] [Satannish/B] version[2.3.5], pid[5749], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:39,893][INFO ][node ] [Satannish/B] initializing ...\n[2016-09-14 09:51:40,178][INFO ][plugins ] [Satannish/B] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:40,337][INFO ][node ] [Satannish/B] initialized\n[2016-09-14 09:51:40,337][INFO ][node ] [Satannish/M] version[2.3.5], pid[5749], build[90f439f/2016-07-27T10:36:52Z]\n[2016-09-14 09:51:40,337][INFO ][node ] [Satannish/M] initializing ...\n[2016-09-14 09:51:40,599][INFO ][plugins ] [Satannish/M] modules [reindex, lang-expression, lang-groovy], plugins [license, marvel-agent], sites []\n[2016-09-14 09:51:40,710][INFO ][node ] [Satannish/M] initialized\n[2016-09-14 09:51:40,714][INFO ][node ] [Satannish] initialized\n[2016-09-14 09:51:40,714][INFO ][node ] [Satannish] starting ...\n[2016-09-14 09:51:40,778][INFO ][transport ] [Satannish] publish_address {192.168.1.105:9320}, bound_addresses {192.168.1.105:9320}\n[2016-09-14 09:51:40,780][INFO ][discovery ] [Satannish] tribe/R-S-WQtmSvOLUInO-Cke7w\n[2016-09-14 09:51:40,781][WARN ][discovery ] [Satannish] waited for 0s and no initial state was set by the discovery\n[2016-09-14 09:51:40,786][INFO ][http ] [Satannish] publish_address {192.168.1.105:9220}, bound_addresses {192.168.1.105:9220}\n[2016-09-14 09:51:40,786][INFO ][node ] [Satannish/A] starting ...\n[2016-09-14 09:51:40,794][INFO ][transport ] [Satannish/A] publish_address {192.168.1.105:9340}, bound_addresses {192.168.1.105:9340}\n[2016-09-14 09:51:40,797][INFO ][discovery ] [Satannish/A] clusterA/RcPrSmbnRlmSh1jPShYQjA\n^C[2016-09-14 09:51:45,560][INFO ][node ] [Satannish] stopping ...\n[2016-09-14 09:51:45,584][INFO ][node ] [Satannish] stopped\n[2016-09-14 09:51:45,584][INFO ][node ] [Satannish] closing ...\n[2016-09-14 09:51:45,584][INFO ][node ] [Satannish/A] stopping ...\n[2016-09-14 09:51:45,590][INFO ][node ] [Satannish/A] stopped\n[2016-09-14 09:51:45,590][INFO ][node ] [Satannish/A] closing ...\n[2016-09-14 09:51:45,595][INFO ][node ] [Satannish/A] closed\n[2016-09-14 09:51:45,595][INFO ][node ] [Satannish/B] closing ...\n[2016-09-14 09:51:45,598][INFO ][node ] [Satannish/B] closed\n[2016-09-14 09:51:45,598][INFO ][node ] [Satannish/M] closing ...\n[2016-09-14 09:51:45,601][INFO ][node ] [Satannish/M] closed\n[2016-09-14 09:51:45,603][INFO ][node ] [Satannish] closed\n\n```\n\ncc @rjernst \n", "created_at": "2016-09-14T07:57:45Z" }, { "body": "I'm removing the adoptme label as I have a fix for this and will open a PR soon. \n", "created_at": "2016-11-13T20:13:51Z" }, { "body": "I opened #21546.\n", "created_at": "2016-11-14T17:07:22Z" } ], "number": 16392, "title": "tribe node fails to start with non default port settings" }
{ "body": "Today when a node starts, we create dynamic socket permissions based on\r\nthe configured HTTP ports and transport ports. If no ports are\r\nconfigured, we use the default port ranges. When a tribe node starts, a\r\ntribe node creates an internal node client for connecting to each remote\r\ncluster. If neither an explicit HTTP port nor transport ports were\r\nspecified, the default port ranges are large enough for the tribe node\r\nand its internal node clients. If an explicit HTTP port or transport\r\nport was specified for the tribe node, then socket permissions for those\r\nports will be created, but not for the internal node clients. Whether\r\nthe internal node clients have explicit ports specified, or attempt to\r\nbind within the default range, socket permissions for these will not\r\nhave been created and the internal node clients will hit a permissions\r\nissue when attempting to bind. This commit addresses this issue by also\r\naccounting for tribe nodes when creating the dynamic socket\r\npermissions. Additionally, we add our first real integration test for\r\ntribe nodes.\r\n\r\nCloses #16392 \r\nCloses #21122", "number": 21546, "review_comments": [ { "body": "Why the copy of the map?\n", "created_at": "2016-11-14T18:19:20Z" }, { "body": "Unrelated to this PR: why do we even allow http to be enabled on internal client nodes? They should only ever be communicated with over transport (internally from the real tribe node, and from other nodes), right?\n", "created_at": "2016-11-14T18:21:51Z" }, { "body": "No need for the explicit task path (this will resolve to local tasks within the project). I try to avoid explicit paths as they make reorganizing a pain.\n", "created_at": "2016-11-14T18:27:05Z" }, { "body": "I pushed 795c1218523a85333838b02e757aa1ad07f17613.\n", "created_at": "2016-11-14T18:57:14Z" }, { "body": "Yeah, that was silly. I pushed 9fb54f4ef8dce57fdf76aa80c075bb5fc29321d3.\n", "created_at": "2016-11-14T18:57:24Z" }, { "body": "Yeah, I'm not sure why we allow HTTP to be enabled on the internal node clients; at least it's disabled by default. :smile:\n", "created_at": "2016-11-14T19:00:42Z" } ], "title": "Add socket permissions for tribe nodes" }
{ "commits": [ { "message": "Add socket permissions for tribe nodes\n\nToday when a node starts, we create dynamic socket permissions based on\nthe configured HTTP ports and transport ports. If no ports are\nconfigured, we use the default port ranges. When a tribe node starts, a\ntribe node creates an internal node client for connecting to each remote\ncluster. If neither an explicit HTTP port nor transport ports were\nspecified, the default port ranges are large enough for the tribe node\nand its internal node clients. If an explicit HTTP port or transport\nport was specified for the tribe node, then socket permissions for those\nports will be created, but not for the internal node clients. Whether\nthe internal node clients have explicit ports specified, or attempt to\nbind within the default range, socket permissions for these will not\nhave been created and the internal node clients will hit a permissions\nissue when attempting to bind. This commit addresses this issue by also\naccounting for tribe nodes when creating the dynamic socket\npermissions. Additionally, we add our first real integration test for\ntribe nodes." }, { "message": "Fallback to settings if transport profile is empty\n\nIf the transport profile does not contain a TCP port range, we fallback\nto the top-level settings." }, { "message": "Remove unnecessary hash map copy in o.e.b.Security\n\nThis commit removes an unnecessary copying of the tribe node group\nsettings in o.e.b.Security." }, { "message": "Remove explicit task paths in tribe node test\n\nThis commit removes some explicit paths in the tribe node smoke test so\nthat these are easier to maintain (in the case of reorganization)." } ], "files": [ { "diff": "@@ -123,7 +123,7 @@ class ClusterConfiguration {\n \n Map<String, String> systemProperties = new HashMap<>()\n \n- Map<String, String> settings = new HashMap<>()\n+ Map<String, Object> settings = new HashMap<>()\n \n // map from destination path, to source file\n Map<String, Object> extraConfigFiles = new HashMap<>()\n@@ -140,7 +140,7 @@ class ClusterConfiguration {\n }\n \n @Input\n- void setting(String name, String value) {\n+ void setting(String name, Object value) {\n settings.put(name, value)\n }\n ", "filename": "buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy", "status": "modified" }, { "diff": "@@ -20,10 +20,10 @@\n package org.elasticsearch.bootstrap;\n \n import org.elasticsearch.SecureSM;\n-import org.elasticsearch.Version;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.SuppressForbidden;\n import org.elasticsearch.common.io.PathUtils;\n+import org.elasticsearch.common.network.NetworkModule;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.env.Environment;\n import org.elasticsearch.http.HttpTransportSettings;\n@@ -266,12 +266,14 @@ static void addFilePermissions(Permissions policy, Environment environment) {\n }\n }\n \n- static void addBindPermissions(Permissions policy, Settings settings) throws IOException {\n- // http is simple\n- String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();\n- // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.\n- // see SocketPermission implies() code\n- policy.add(new SocketPermission(\"*:\" + httpRange, \"listen,resolve\"));\n+ /**\n+ * Add dynamic {@link SocketPermission}s based on HTTP and transport settings.\n+ *\n+ * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.\n+ * @param settings the {@link Settings} instance to read the HTTP and transport settings from\n+ */\n+ static void addBindPermissions(Permissions policy, Settings settings) {\n+ addSocketPermissionForHttp(policy, settings);\n // transport is waaaay overengineered\n Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups();\n if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {\n@@ -284,18 +286,78 @@ static void addBindPermissions(Permissions policy, Settings settings) throws IOE\n for (Map.Entry<String, Settings> entry : profiles.entrySet()) {\n Settings profileSettings = entry.getValue();\n String name = entry.getKey();\n- String transportRange = profileSettings.get(\"port\", TransportSettings.PORT.get(settings));\n \n // a profile is only valid if its the default profile, or if it has an actual name and specifies a port\n boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get(\"port\") != null);\n if (valid) {\n- // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.\n- // see SocketPermission implies() code\n- policy.add(new SocketPermission(\"*:\" + transportRange, \"listen,resolve\"));\n+ addSocketPermissionForTransportProfile(policy, profileSettings, settings);\n+ }\n+ }\n+\n+ for (final Settings tribeNodeSettings : settings.getGroups(\"tribe\", true).values()) {\n+ // tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting\n+ if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) {\n+ addSocketPermissionForHttp(policy, tribeNodeSettings);\n }\n+ addSocketPermissionForTransport(policy, tribeNodeSettings);\n }\n }\n \n+ /**\n+ * Add dynamic {@link SocketPermission} based on HTTP settings.\n+ *\n+ * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.\n+ * @param settings the {@link Settings} instance to read the HTTP settingsfrom\n+ */\n+ private static void addSocketPermissionForHttp(final Permissions policy, final Settings settings) {\n+ // http is simple\n+ final String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();\n+ addSocketPermissionForPortRange(policy, httpRange);\n+ }\n+\n+ /**\n+ * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in\n+ * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}.\n+ *\n+ * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to\n+ * @param profileSettings the {@link Settings} to read the transport profile from\n+ * @param settings the {@link Settings} instance to read the transport settings from\n+ */\n+ private static void addSocketPermissionForTransportProfile(\n+ final Permissions policy,\n+ final Settings profileSettings,\n+ final Settings settings) {\n+ final String transportRange = profileSettings.get(\"port\");\n+ if (transportRange != null) {\n+ addSocketPermissionForPortRange(policy, transportRange);\n+ } else {\n+ addSocketPermissionForTransport(policy, settings);\n+ }\n+ }\n+\n+ /**\n+ * Add dynamic {@link SocketPermission} based on transport settings.\n+ *\n+ * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to\n+ * @param settings the {@link Settings} instance to read the transport settings from\n+ */\n+ private static void addSocketPermissionForTransport(final Permissions policy, final Settings settings) {\n+ final String transportRange = TransportSettings.PORT.get(settings);\n+ addSocketPermissionForPortRange(policy, transportRange);\n+ }\n+\n+ /**\n+ * Add dynamic {@link SocketPermission} for the specified port range.\n+ *\n+ * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission} to.\n+ * @param portRange the port range\n+ */\n+ private static void addSocketPermissionForPortRange(final Permissions policy, final String portRange) {\n+ // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.\n+ // see SocketPermission implies() code\n+ policy.add(new SocketPermission(\"*:\" + portRange, \"listen,resolve\"));\n+ }\n+\n /**\n * Add access to path (and all files underneath it)\n * @param policy current policy to add permissions to", "filename": "core/src/main/java/org/elasticsearch/bootstrap/Security.java", "status": "modified" }, { "diff": "@@ -0,0 +1,69 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+import org.elasticsearch.gradle.test.ClusterConfiguration\n+import org.elasticsearch.gradle.test.ClusterFormationTasks\n+import org.elasticsearch.gradle.test.NodeInfo\n+\n+apply plugin: 'elasticsearch.rest-test'\n+\n+List<NodeInfo> oneNodes\n+\n+task setupClusterOne(type: DefaultTask) {\n+ mustRunAfter(precommit)\n+ ClusterConfiguration configOne = new ClusterConfiguration(project)\n+ configOne.clusterName = 'one'\n+ configOne.setting('node.name', 'one')\n+ oneNodes = ClusterFormationTasks.setup(project, setupClusterOne, configOne)\n+}\n+\n+List<NodeInfo> twoNodes\n+\n+task setupClusterTwo(type: DefaultTask) {\n+ mustRunAfter(precommit)\n+ ClusterConfiguration configTwo = new ClusterConfiguration(project)\n+ configTwo.clusterName = 'two'\n+ configTwo.setting('node.name', 'two')\n+ twoNodes = ClusterFormationTasks.setup(project, setupClusterTwo, configTwo)\n+}\n+\n+integTest {\n+ dependsOn(setupClusterOne, setupClusterTwo)\n+ cluster {\n+ // tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied\n+ // would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to\n+ // ensure that the code that fixes this bug is exercised\n+ setting 'http.port', '40200-40249'\n+ setting 'transport.tcp.port', '40300-40349'\n+ setting 'node.name', 'quest'\n+ setting 'tribe.one.cluster.name', 'one'\n+ setting 'tribe.one.discovery.zen.ping.unicast.hosts', \"'${-> oneNodes.get(0).transportUri()}'\"\n+ setting 'tribe.one.http.enabled', 'true'\n+ setting 'tribe.one.http.port', '40250-40299'\n+ setting 'tribe.one.transport.tcp.port', '40350-40399'\n+ setting 'tribe.two.cluster.name', 'two'\n+ setting 'tribe.two.discovery.zen.ping.unicast.hosts', \"'${-> twoNodes.get(0).transportUri()}'\"\n+ setting 'tribe.two.http.enabled', 'true'\n+ setting 'tribe.two.http.port', '40250-40299'\n+ setting 'tribe.two.transport.tcp.port', '40250-40399'\n+ }\n+ // need to kill the standalone nodes here\n+ finalizedBy 'setupClusterOne#stop'\n+ finalizedBy 'setupClusterTwo#stop'\n+}", "filename": "qa/smoke-test-tribe-node/build.gradle", "status": "added" }, { "diff": "@@ -0,0 +1,53 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.tribe;\n+\n+import com.carrotsearch.randomizedtesting.annotations.Name;\n+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;\n+import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;\n+import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;\n+import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException;\n+\n+import java.io.IOException;\n+\n+public class TribeClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {\n+\n+ // tribe nodes can not handle delete indices requests\n+ @Override\n+ protected boolean preserveIndicesUponCompletion() {\n+ return true;\n+ }\n+\n+ // tribe nodes can not handle delete template requests\n+ @Override\n+ protected boolean preserveTemplatesUponCompletion() {\n+ return true;\n+ }\n+\n+ public TribeClientYamlTestSuiteIT(@Name(\"yaml\") final ClientYamlTestCandidate testCandidate) {\n+ super(testCandidate);\n+ }\n+\n+ @ParametersFactory\n+ public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {\n+ return createParameters();\n+ }\n+\n+}", "filename": "qa/smoke-test-tribe-node/src/test/java/org/elasticsearch/tribe/TribeClientYamlTestSuiteIT.java", "status": "added" }, { "diff": "@@ -0,0 +1,16 @@\n+---\n+\"Tribe node test\":\n+ - do:\n+ cat.nodes:\n+ h: name\n+ s: name\n+ v: true\n+\n+ - match:\n+ $body: |\n+ /^ name\\n\n+ one\\n\n+ quest\\n\n+ quest/one\\n\n+ quest/two\\n\n+ two\\n $/", "filename": "qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yaml", "status": "added" }, { "diff": "@@ -58,12 +58,13 @@ List projects = [\n 'qa:evil-tests',\n 'qa:rolling-upgrade',\n 'qa:smoke-test-client',\n+ 'qa:smoke-test-http',\n 'qa:smoke-test-ingest-with-all-dependencies',\n 'qa:smoke-test-ingest-disabled',\n 'qa:smoke-test-multinode',\n 'qa:smoke-test-plugins',\n 'qa:smoke-test-reindex-with-painless',\n- 'qa:smoke-test-http',\n+ 'qa:smoke-test-tribe-node',\n 'qa:vagrant',\n ]\n ", "filename": "settings.gradle", "status": "modified" }, { "diff": "@@ -150,6 +150,16 @@ protected boolean preserveIndicesUponCompletion() {\n return false;\n }\n \n+ /**\n+ * Controls whether or not to preserve templates upon completion of this test. The default implementation is to delete not preserve\n+ * templates.\n+ *\n+ * @return whether or not to preserve templates\n+ */\n+ protected boolean preserveTemplatesUponCompletion() {\n+ return false;\n+ }\n+\n private void wipeCluster() throws IOException {\n if (preserveIndicesUponCompletion() == false) {\n // wipe indices\n@@ -164,7 +174,9 @@ private void wipeCluster() throws IOException {\n }\n \n // wipe index templates\n- adminClient().performRequest(\"DELETE\", \"_template/*\");\n+ if (preserveTemplatesUponCompletion() == false) {\n+ adminClient().performRequest(\"DELETE\", \"_template/*\");\n+ }\n \n wipeSnapshots();\n }", "filename": "test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java", "status": "modified" } ] }
{ "body": "<!--\r\nGitHub is reserved for bug reports and feature requests. The best place\r\nto ask a general question is at the Elastic Discourse forums at\r\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\r\na feature request, please include one and only one of the below blocks\r\nin your new issue. Note that whether you're filing a bug report or a\r\nfeature request, ensure that your submission is for an\r\n[OS that we support](https://www.elastic.co/support/matrix#show_os).\r\nBug reports on an OS that we do not support or feature requests\r\nspecific to an OS that we do not support will be closed.\r\n-->\r\n\r\n<!--\r\nIf you are filing a bug report, please remove the below feature\r\nrequest block and provide responses for all of the below items.\r\n-->\r\n\r\n**Elasticsearch version**: 5.0.0\r\n\r\n**Plugins installed**: []\r\n\r\n**JVM version**:Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_65/25.65-b01\r\n\r\n**OS version**:Linux/4.2.6-201.fc22.x86_64/amd64\r\n\r\n**Description of the problem including expected versus actual behavior**: When running elasticsearch with assertions enabled, fetching inner_hits for a nested document inside a child document causes an assertion to fail.\r\n\r\n**Steps to reproduce**:\r\n1. Start Elasticsearch 5.0 with assertions enabled\r\n2. Run [these sense commands](https://github.com/elastic/elasticsearch/files/586347/crash-repro-steps.txt)\r\n3. The last one will fail\r\n\r\n**Provide logs (if relevant)**:\r\n", "comments": [ { "body": "Thanks @kbarrett! I've updated a pr with a fix #21522.\n", "created_at": "2016-11-14T06:28:58Z" } ], "number": 21503, "title": "Failing assertion in ParentFieldSubFetchPhase" }
{ "body": "Otherwise an empty string get added as _parent field.\r\n\r\nPR for #21503", "number": 21522, "review_comments": [ { "body": "I don't understand this comment?\n", "created_at": "2016-11-14T08:31:42Z" }, { "body": "If I'm not mistaken, this would not work if we were able to put a parent/child inner hit under a nested inner hit. Maybe it does not matter since it is not possible today, but I don't like that this fix does not generalize well. Maybe we should instead fix `getParentId` to return `null` when a document does not have a `_parent` field using `LeafReader.getDocsWithField`?\n", "created_at": "2016-11-14T08:35:47Z" }, { "body": "> put a parent/child inner hit under a nested inner hit\n\nRight, that doesn't work. One can only nest a nested inner hit under a p/c inner hit, which is what this check is checking for.\n\n> Maybe we should instead fix getParentId to return null when a document does not have a _parent field using LeafReader.getDocsWithField?\n\n+1 That is a cleaner solution.\n", "created_at": "2016-11-14T10:44:23Z" }, { "body": "Done\n", "created_at": "2016-11-14T10:49:33Z" } ], "title": "Skip adding a parent field to nested documents." }
{ "commits": [ { "message": "inner_hits: Skip adding a parent field to nested documents.\n\nOtherwise an empty string get added as _parent field.\n\nCloses #21503" } ], "files": [ { "diff": "@@ -47,6 +47,11 @@ public void hitExecute(SearchContext context, HitContext hitContext) {\n }\n \n String parentId = getParentId(parentFieldMapper, hitContext.reader(), hitContext.docId());\n+ if (parentId == null) {\n+ // hit has no _parent field. Can happen for nested inner hits if parent hit is a p/c document.\n+ return;\n+ }\n+\n Map<String, SearchHitField> fields = hitContext.hit().fieldsOrNull();\n if (fields == null) {\n fields = new HashMap<>();\n@@ -59,8 +64,7 @@ public static String getParentId(ParentFieldMapper fieldMapper, LeafReader reade\n try {\n SortedDocValues docValues = reader.getSortedDocValues(fieldMapper.name());\n BytesRef parentId = docValues.get(docId);\n- assert parentId.length > 0;\n- return parentId.utf8ToString();\n+ return parentId.length > 0 ? parentId.utf8ToString() : null;\n } catch (IOException e) {\n throw ExceptionsHelper.convertToElastic(e);\n }", "filename": "core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java", "status": "modified" }, { "diff": "@@ -995,4 +995,21 @@ public void testNestedSourceFiltering() throws Exception {\n equalTo(\"fox ate rabbit x y z\"));\n }\n \n+ public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception {\n+ assertAcked(prepareCreate(\"test\").addMapping(\"child_type\", \"_parent\", \"type=parent_type\", \"nested_type\", \"type=nested\"));\n+ client().prepareIndex(\"test\", \"parent_type\", \"1\").setSource(\"key\", \"value\").get();\n+ client().prepareIndex(\"test\", \"child_type\", \"2\").setParent(\"1\").setSource(\"nested_type\", Collections.singletonMap(\"key\", \"value\"))\n+ .get();\n+ refresh();\n+ SearchResponse response = client().prepareSearch(\"test\")\n+ .setQuery(boolQuery().must(matchQuery(\"key\", \"value\"))\n+ .should(hasChildQuery(\"child_type\", nestedQuery(\"nested_type\", matchAllQuery(), ScoreMode.None)\n+ .innerHit(new InnerHitBuilder()), ScoreMode.None).innerHit(new InnerHitBuilder())))\n+ .get();\n+ assertHitCount(response, 1);\n+ SearchHit hit = response.getHits().getAt(0);\n+ assertThat(hit.getInnerHits().get(\"child_type\").getAt(0).field(\"_parent\").getValue(), equalTo(\"1\"));\n+ assertThat(hit.getInnerHits().get(\"child_type\").getAt(0).getInnerHits().get(\"nested_type\").getAt(0).field(\"_parent\"), nullValue());\n+ }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java", "status": "modified" } ] }
{ "body": "Start with 2.4.0:\r\n\r\n```\r\nPUT my_index\r\n{\r\n \"mappings\": {\r\n \"my_type\": {\r\n \"_ttl\": {\r\n \"enabled\": true\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nUpgrade to 5.0.0:\r\n\r\n```\r\nPUT my_index/my_type/1?ttl=1s\r\n{\r\n \"text\": \"Will expire in 10 minutes\"\r\n}\r\n\r\n\r\nGET my_index/_search\r\n```\r\n\r\nReturns:\r\n\r\n```\r\n{\r\n \"took\": 34,\r\n \"timed_out\": false,\r\n \"_shards\": {\r\n \"total\": 5,\r\n \"successful\": 4,\r\n \"failed\": 1,\r\n \"failures\": [\r\n {\r\n \"shard\": 3,\r\n \"index\": \"my_index\",\r\n \"node\": \"AFATqx6CR6SPvGWQ0DaxvA\",\r\n \"reason\": {\r\n \"type\": \"illegal_argument_exception\",\r\n \"reason\": \"features that prevent cachability are disabled on this context\"\r\n }\r\n }\r\n ]\r\n },\r\n \"hits\": {\r\n \"total\": 1,\r\n \"max_score\": 1,\r\n \"hits\": []\r\n }\r\n} \r\n```\r\n\r\nRelated to https://github.com/elastic/elasticsearch/pull/21303", "comments": [ { "body": "Stacktrace:\n\n```\n[2016-11-10T09:42:48,016][DEBUG][o.e.a.s.TransportSearchAction] [AFATqx6] [3] Failed to execute fetch phase\norg.elasticsearch.transport.RemoteTransportException: [AFATqx6][127.0.0.1:9300][indices:data/read/search[phase/fetch/id]]\nCaused by: java.lang.IllegalArgumentException: features that prevent cachability are disabled on this context\n at org.elasticsearch.index.query.QueryShardContext.failIfFrozen(QueryShardContext.java:396) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.index.query.QueryShardContext.nowInMillis(QueryShardContext.java:278) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.index.mapper.TTLFieldMapper$TTLFieldType.valueForSearch(TTLFieldMapper.java:146) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.index.fieldvisitor.FieldsVisitor.postProcess(FieldsVisitor.java:93) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.search.fetch.FetchPhase.createSearchHit(FetchPhase.java:187) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:152) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:474) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.action.search.SearchTransportService.lambda$registerRequestHandler$684(SearchTransportService.java:311) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69) ~[elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.transport.TransportService$6.doRun(TransportService.java:548) [elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:520) [elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-5.0.1-SNAPSHOT.jar:5.0.1-SNAPSHOT]\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_73]\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_73]\n at java.lang.Thread.run(Thread.java:745) [?:1.8.0_73]\n```\n", "created_at": "2016-11-10T08:48:20Z" }, { "body": "Does ttl available in ES 5.x ?", "created_at": "2016-12-01T20:39:51Z" }, { "body": "@clintongormley this is fixed no?", "created_at": "2016-12-09T15:06:00Z" }, { "body": "Closed by https://github.com/elastic/elasticsearch/pull/21493", "created_at": "2016-12-12T14:58:25Z" }, { "body": "@clintongormley sorry for duplicating question is ttl available in ES 5.x ?", "created_at": "2016-12-12T15:00:19Z" }, { "body": "@Hronom only for bwc, not for new indices", "created_at": "2016-12-12T15:03:47Z" } ], "number": 21457, "title": "TTL fails with `features that prevent cachability are disabled on this context`" }
{ "body": "This applies the same stance as 5.1 and 6.0 branches that we can just use System.currentTimeMillis() when returning the value for expiration in the fetch phase instead of getting he value of now from the frozen context which is now not possible.\r\n\r\nCloses #21457", "number": 21493, "review_comments": [], "title": "Fixes cachability problems with fetching TTL values when searching" }
{ "commits": [ { "message": "Fixes cachability problems with fetching TTL values when searching\n\nThis applies the same stance as 5.1 and 6.0 branches that we can\njust use System.currentTimeMillis() when returning the value for\nexpiration in the fetch phase instead of getting he value of now\nfrom the frozen context which is now not possible.\n\nCloses #21457" } ], "files": [ { "diff": "@@ -28,8 +28,6 @@\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.index.AlreadyExpiredException;\n-import org.elasticsearch.search.internal.SearchContext;\n-\n import java.io.IOException;\n import java.util.Date;\n import java.util.Iterator;\n@@ -140,13 +138,7 @@ public TTLFieldType clone() {\n // Overrides valueForSearch to display live value of remaining ttl\n @Override\n public Object valueForSearch(Object value) {\n- long now;\n- SearchContext searchContext = SearchContext.current();\n- if (searchContext != null) {\n- now = searchContext.getQueryShardContext().nowInMillis();\n- } else {\n- now = System.currentTimeMillis();\n- }\n+ long now = System.currentTimeMillis();\n Long val = (Long) super.valueForSearch(value);\n return val - now;\n }", "filename": "core/src/main/java/org/elasticsearch/index/mapper/TTLFieldMapper.java", "status": "modified" }, { "diff": "@@ -26,6 +26,7 @@\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;\n import org.elasticsearch.action.get.GetResponse;\n import org.elasticsearch.action.index.IndexResponse;\n+import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.update.UpdateRequestBuilder;\n import org.elasticsearch.action.update.UpdateResponse;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n@@ -48,6 +49,7 @@\n import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;\n import static org.hamcrest.Matchers.both;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n@@ -302,4 +304,25 @@ private void assertTTLMappingEnabled(String index, String type) throws IOExcepti\n assertThat(ttlAsString, is(notNullValue()));\n assertThat(errMsg, ttlAsString, is(\"{enabled=true}\"));\n }\n+\n+ // Test for #21457\n+ public void testSearchWithTTL() throws Exception {\n+ assertAcked(prepareCreate(\"test\")\n+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0.id)\n+ .addMapping(\"type1\", XContentFactory.jsonBuilder()\n+ .startObject()\n+ .startObject(\"type1\")\n+ .startObject(\"_ttl\").field(\"enabled\", true).endObject()\n+ .endObject()\n+ .endObject()));\n+\n+ long providedTTLValue = 300000;\n+ IndexResponse indexResponse = client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"field1\", \"value1\")\n+ .setTTL(providedTTLValue).setRefreshPolicy(IMMEDIATE).get();\n+ assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());\n+\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").get();\n+ assertSearchResponse(searchResponse);\n+ assertEquals(1L, searchResponse.getHits().getTotalHits());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/ttl/SimpleTTLIT.java", "status": "modified" } ] }
{ "body": "Got a 5.0.0-cluster logging this and failing to take snapshots. It should probably overwrite or gracefully handle the fact that he file exists.\r\n\r\n```\r\n[2016-11-10T13:04:33,885][WARN ][org.elasticsearch.snapshots.SnapshotsService] [found-snapshots:scheduled-1478783056-instance-0000000006/BWJzkt8QS6WPFBT8D5Ox4w] failed to finalize snapshot\r\norg.elasticsearch.repositories.RepositoryException: [found-snapshots] failed to update snapshot in repository\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.finalizeSnapshot(BlobStoreRepository.java:544) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.snapshots.SnapshotsService$5.run(SnapshotsService.java:878) [elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:444) [elasticsearch-5.0.0.jar:5.0.0]\r\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_72]\r\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_72]\r\n\tat java.lang.Thread.run(Thread.java:745) [?:1.8.0_72]\r\nCaused by: java.nio.file.FileAlreadyExistsException: blob [pending-index-293] already exists, cannot overwrite\r\n\tat org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.writeBlob(S3BlobContainer.java:105) ~[?:?]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.writeAtomic(BlobStoreRepository.java:872) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.writeIndexGen(BlobStoreRepository.java:782) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\tat org.elasticsearch.repositories.blobstore.BlobStoreRepository.finalizeSnapshot(BlobStoreRepository.java:540) ~[elasticsearch-5.0.0.jar:5.0.0]\r\n\t... 5 more\r\n```", "comments": [ { "body": "@alexbrasetvik do you have additional logs you can share from the node? this looks like a genuine issue - we purposely prevented overwriting of blobs and reworked snapshots to use UUIDs so we don't have to worry about overwrites, so this is likely a bug related to that. \n", "created_at": "2016-11-10T13:39:02Z" } ], "number": 21462, "title": "Snapshots continuously failing with blob [pending-index-293] already exists, cannot overwrite" }
{ "body": "This PR ensures pending `index-*` blobs are deleted when snapshotting. The\r\n`index-*` blobs are generational files that maintain the snapshots\r\nin the repository. To write these atomically, we first write a\r\n`pending-index-*` blob, then move it to `index-*`, which also deletes\r\n`pending-index-*` in case its not a file-system level move (e.g.\r\nS3 repositories) . For example, to write the 5th generation of the\r\nindex blob for the repository, we would first write the bytes to\r\n`pending-index-5` and then move `pending-index-5` to `index-5`. It is\r\npossible that we fail after writing `pending-index-5`, but before\r\nmoving it to `index-5` or deleting `pending-index-5`. In this case,\r\nwe will have a dangling `pending-index-5` blob laying around. Since\r\nsnapshot number 5 would have failed, the next snapshot assumes a generation\r\nnumber of 5, so it tries to write to `index-5`, which first tries to\r\nwrite to `pending-index-5` before moving the blob to `index-5`. Since\r\n`pending-index-5` is leftover from the previous failure, the snapshot\r\nfails as it cannot overwrite this blob.\r\n\r\nThis commit solves the problem by first, adding a UUID to the\r\n`pending-index-*` blobs, and secondly, strengthen the logic around\r\nfailure to write the `index-*` generational blob to ensure pending\r\nfiles are deleted on cleanup.\r\n\r\nCloses #21462", "number": 21469, "review_comments": [ { "body": "I wonder if it's nicer to append the random uuid.\n", "created_at": "2016-11-10T15:48:54Z" }, { "body": "`exceptionToThrow` can never be null?\nMaybe it's easier to just do the `ex.addSuppressed(...)`?\n", "created_at": "2016-11-10T15:50:50Z" }, { "body": "I don't think we should special-case this. Depending on underlying blobcontainer, a different exception might be thrown.\n", "created_at": "2016-11-10T15:57:37Z" }, { "body": "Sure, done\n", "created_at": "2016-11-10T16:00:56Z" }, { "body": "Its part of the contract of the `BlobContainer` to throw a `NoSuchFileException` if the blob doesn't exist on calling `deleteBlob`, and we changed all implementations to adhere to this contract. That said, you are right, I went through two iterations of this, and now the only way we get to this block is if an exception was thrown, in which case we don't care about the nature of the exception here. Same with below\n", "created_at": "2016-11-10T16:04:04Z" }, { "body": "is can\n", "created_at": "2016-11-10T22:48:10Z" }, { "body": "maybe readd comment here (temp file creation or move failed - clean up)\n", "created_at": "2016-11-10T22:49:47Z" }, { "body": "`super.readBlock` instead of `readBlock` to prevent double `maybeIOExceptionOrBlock`.\n", "created_at": "2016-11-10T22:52:00Z" }, { "body": "maybe add some randomization to the random_control_io_exception_rate?\n", "created_at": "2016-11-10T22:53:13Z" }, { "body": "can we catch a more specific exception type? IOException with the `Random IOException` string?\n", "created_at": "2016-11-10T22:55:27Z" }, { "body": "good catch\n", "created_at": "2016-11-11T02:03:44Z" }, { "body": "The issue here is that either `SnapshotCreationException` or `RepositoryException` can be thrown, but what we can do here is ensure the stack trace has the `Random IOException` in the nested stack trace.\n", "created_at": "2016-11-11T02:41:32Z" }, { "body": "can be specified as `catch (SnapshotCreationException | RepositoryException ex)` in Java ;-)\n", "created_at": "2016-11-11T08:47:36Z" }, { "body": "sorry, forgot to add here that we could randomize between atomic and non-atomic move.\n", "created_at": "2016-11-11T08:48:29Z" }, { "body": "I pushed adb7aaded4ee9cfe19dd59781b3d85e570df9a57 to randomize between atomic and non-atomic\n", "created_at": "2016-11-11T14:15:46Z" } ], "title": "Ensures cleanup of temporary index-* generational blobs during snapshotting" }
{ "commits": [ { "message": "Ensures pending index-* blobs are deleted when snapshotting. The\nindex-* blobs are generational files that maintain the snapshots\nin the repository. To write these atomically, we first write a\n`pending-index-*` blob, then move it to `index-*`, which also deletes\n`pending-index-*` in case its not a file-system level move (e.g.\nS3 repositories) . For example, to write the 5th generation of the\nindex blob for the repository, we would first write the bytes to\n`pending-index-5` and then move `pending-index-5` to `index-5`. It is\npossible that we fail after writing `pending-index-5`, but before\nmoving it to `index-5` or deleting `pending-index-5`. In this case,\nwe will have a dangling `pending-index-5` blob laying around. Since\nsnapshot #5 would have failed, the next snapshot assumes a generation\nnumber of 5, so it tries to write to `index-5`, which first tries to\nwrite to `pending-index-5` before moving the blob to `index-5`. Since\n`pending-index-5` is leftover from the previous failure, the snapshot\nfails as it cannot overwrite this blob.\n\nThis commit solves the problem by first, adding a UUID to the\n`pending-index-*` blobs, and secondly, strengthen the logic around\nfailure to write the `index-*` generational blob to ensure pending\nfiles are deleted on cleanup.\n\nCloses #21462" }, { "message": "improved exception handling" }, { "message": "update javadocs to reflect move is not always atomic" }, { "message": "add test" }, { "message": "address comments" } ], "files": [ { "diff": "@@ -105,8 +105,11 @@ public interface BlobContainer {\n Map<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException;\n \n /**\n- * Atomically renames the source blob into the target blob. If the source blob does not exist or the\n- * target blob already exists, an exception is thrown.\n+ * Renames the source blob into the target blob. If the source blob does not exist or the\n+ * target blob already exists, an exception is thrown. Atomicity of the move operation\n+ * can only be guaranteed on an implementation-by-implementation basis. The only current\n+ * implementation of {@link BlobContainer} for which atomicity can be guaranteed is the\n+ * {@link org.elasticsearch.common.blobstore.fs.FsBlobContainer}.\n *\n * @param sourceBlobName\n * The blob to rename.", "filename": "core/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java", "status": "modified" }, { "diff": "@@ -867,15 +867,17 @@ private long listBlobsToGetLatestIndexId() throws IOException {\n }\n \n private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException {\n- final String tempBlobName = \"pending-\" + blobName;\n+ final String tempBlobName = \"pending-\" + blobName + \"-\" + UUIDs.randomBase64UUID();\n try (InputStream stream = bytesRef.streamInput()) {\n snapshotsBlobContainer.writeBlob(tempBlobName, stream, bytesRef.length());\n- }\n- try {\n snapshotsBlobContainer.move(tempBlobName, blobName);\n } catch (IOException ex) {\n- // Move failed - try cleaning up\n- snapshotsBlobContainer.deleteBlob(tempBlobName);\n+ // temporary blob creation or move failed - try cleaning up\n+ try {\n+ snapshotsBlobContainer.deleteBlob(tempBlobName);\n+ } catch (IOException e) {\n+ ex.addSuppressed(e);\n+ }\n throw ex;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java", "status": "modified" }, { "diff": "@@ -2672,4 +2672,53 @@ public void testSnapshotCanceledOnRemovedShard() throws Exception {\n assertEquals(\"IndexShardSnapshotFailedException[Aborted]\", snapshotInfo.shardFailures().get(0).reason());\n }\n \n+ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception {\n+ logger.info(\"--> creating repository\");\n+ final Path repoPath = randomRepoPath();\n+ assertAcked(client().admin().cluster().preparePutRepository(\"test-repo\").setType(\"mock\").setVerify(false).setSettings(\n+ Settings.builder().put(\"location\", repoPath).put(\"random_control_io_exception_rate\", randomIntBetween(5, 20) / 100f)));\n+\n+ logger.info(\"--> indexing some data\");\n+ createIndex(\"test-idx\");\n+ ensureGreen();\n+ final int numDocs = randomIntBetween(1, 5);\n+ for (int i = 0; i < numDocs; i++) {\n+ index(\"test-idx\", \"doc\", Integer.toString(i), \"foo\", \"bar\" + i);\n+ }\n+ refresh();\n+ assertThat(client().prepareSearch(\"test-idx\").setSize(0).get().getHits().totalHits(), equalTo((long) numDocs));\n+\n+ logger.info(\"--> snapshot with potential I/O failures\");\n+ try {\n+ CreateSnapshotResponse createSnapshotResponse =\n+ client().admin().cluster().prepareCreateSnapshot(\"test-repo\", \"test-snap\")\n+ .setWaitForCompletion(true)\n+ .setIndices(\"test-idx\")\n+ .get();\n+ if (createSnapshotResponse.getSnapshotInfo().totalShards() != createSnapshotResponse.getSnapshotInfo().successfulShards()) {\n+ assertThat(getFailureCount(\"test-repo\"), greaterThan(0L));\n+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));\n+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {\n+ assertThat(shardFailure.reason(), containsString(\"Random IOException\"));\n+ }\n+ }\n+ } catch (Exception ex) {\n+ // sometimes, the snapshot will fail with a top level I/O exception\n+ assertThat(ExceptionsHelper.stackTrace(ex), containsString(\"Random IOException\"));\n+ }\n+\n+ logger.info(\"--> snapshot with no I/O failures\");\n+ assertAcked(client().admin().cluster().preparePutRepository(\"test-repo-2\").setType(\"mock\").setVerify(false).setSettings(\n+ Settings.builder().put(\"location\", repoPath)));\n+ CreateSnapshotResponse createSnapshotResponse =\n+ client().admin().cluster().prepareCreateSnapshot(\"test-repo-2\", \"test-snap-2\")\n+ .setWaitForCompletion(true)\n+ .setIndices(\"test-idx\")\n+ .get();\n+ assertEquals(0, createSnapshotResponse.getSnapshotInfo().failedShards());\n+ GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(\"test-repo-2\")\n+ .addSnapshots(\"test-snap-2\").get();\n+ assertEquals(SnapshotState.SUCCESS, getSnapshotsResponse.getSnapshots().get(0).state());\n+ }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java", "status": "modified" }, { "diff": "@@ -321,14 +321,20 @@ public Map<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws\n \n @Override\n public void move(String sourceBlob, String targetBlob) throws IOException {\n+ // simulate a non-atomic move, since many blob container implementations\n+ // will not have an atomic move, and we should be able to handle that\n maybeIOExceptionOrBlock(targetBlob);\n- super.move(sourceBlob, targetBlob);\n+ super.writeBlob(targetBlob, super.readBlob(sourceBlob), 0L);\n+ super.deleteBlob(sourceBlob);\n }\n \n @Override\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n maybeIOExceptionOrBlock(blobName);\n super.writeBlob(blobName, inputStream, blobSize);\n+ // for network based repositories, the blob may have been written but we may still\n+ // get an error with the client connection, so an IOException here simulates this\n+ maybeIOExceptionOrBlock(blobName);\n }\n }\n }", "filename": "core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.4.1 \nUsed to work in 2.3.1.\n\n**Plugins installed**: none\n\n**JVM version**: java version \"1.8.0_101\"\nJava(TM) SE Runtime Environment (build 1.8.0_101-b13)\nJava HotSpot(TM) 64-Bit Server VM (build 25.101-b13, mixed mode)\n\n**OS version**: Fedora 24\n\n**Description of the problem including expected versus actual behavior**:\n\nParsing of \"ZZZ\" component in date formats seems to fail for some formats. I don't know what exactly triggers the issue, but having a literal just before the \"ZZZ\" component seems to be enough.\n\nI managed to reproduce the bug in Joda-time 2.9.4 directly. There seems to be a [fix in master](https://github.com/yrodiere/joda-time/commit/eaaff1d0687fc86a745ca8f3ea0b069b3cf54233), but it has not been released yet.\n\nThe next release of Elasticsearch on the 2.4 branch should upgrade the Joda dependency to fix this bug.\n\n**Steps to reproduce**:\n1. Launch an Elasticsearch node on localhost:9200\n2. Execute the following bash script:\n\n```\ncurl -XPUT 'localhost:9200/testindex/?pretty'\ncurl -XPUT 'localhost:9200/testindex/testtype/_mapping?pretty' -d'{\"properties\":{\"zonedDate\":{\"type\":\"date\",\"store\":true,\"format\":\"yyyy-MM-dd'['ZZZ']'\"}}}'\ncurl -XPUT 'localhost:9200/testindex/testtype/1?pretty' -d'{\"zonedDate\":\"2016-10-13[CET]\"}'\n```\n\n**Provide logs (if relevant)**:\nElasticsearch response for the last command:\n\n```\n{\n \"error\" : {\n \"root_cause\" : [ {\n \"type\" : \"mapper_parsing_exception\",\n \"reason\" : \"failed to parse [zonedDate]\"\n } ],\n \"type\" : \"mapper_parsing_exception\",\n \"reason\" : \"failed to parse [zonedDate]\",\n \"caused_by\" : {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"Invalid format: \\\"2016-10-13[CET]\\\" is malformed at \\\"CET]\\\"\"\n }\n },\n \"status\" : 400\n}\n```\n", "comments": [ { "body": "Hello,\n\nIt seems Joda Time 2.9.5 is out. I just did a quick test, and overriding the version of the library to 2.9.5 seems to fix the issue on Elasticsearch 2.4.1: running the set of commands I gave above will not fail anymore.\nIt would probably also fix 5.0.0 (which is also affected by this bug, by the way).\n\nAny chance to see this in the next hotfix releases for 2.x and 5.x?\n", "created_at": "2016-11-10T08:22:20Z" }, { "body": "@tlrx do you know if there will be another maintenance release for 2.4.x? It would be nice to get it fixed for 2.4.x too.\n\nThanks!\n", "created_at": "2016-11-10T16:45:50Z" }, { "body": "@gsmet I'm backporting it to 2.4.x right now so if there's another maintenance release it will be part of it.\n", "created_at": "2016-11-10T16:51:42Z" }, { "body": "@tlrx thanks! FYI, we can't support ZonedDateTime in our Hibernate Search-Elasticsearch integration at the moment because of this bug and we do not support 5 yet - it's in project. It would be nice to get a 2.4.x release without this issue!\n", "created_at": "2016-11-10T17:05:04Z" } ], "number": 20911, "title": "Parsing of \"ZZZ\" component in date formats fails" }
{ "body": "This commit updates JodaTime to version 2.9.5 that contain a fix for a bug when parsing time zones (see https://github.com/JodaOrg/joda-time/pull/332, https://github.com/JodaOrg/joda-time/issues/386 and https://github.com/JodaOrg/joda-time/issues/373).\r\n\r\nIt also remove the joda-convert dependency that seems to be unused.\r\n \r\ncloses #20911\r\n\r\nHere is the changelog for 2.9.5:\r\n```\r\nChanges in 2.9.5\r\n----------------\r\n - Add Norwegian period translations [#378]\r\n\r\n - Add Duration.dividedBy(long,RoundingMode) [#69, #379]\r\n\r\n - DateTimeZone data updated to version 2016i\r\n\r\n - Fixed bug where clock read twice when comparing two nulls in DateTimeComparator [#404]\r\n\r\n - Fixed minor issues with historic time-zone data [#373]\r\n\r\n - Fix bug in time-zone binary search [#332, #386]\r\n The fix in v2.9.2 caused problems when the time-zone being parsed\r\n was not the last element in the input string. New approach uses a\r\n different approach to the problem.\r\n\r\n - Update tests for JDK 9 [#394]\r\n\r\n - Close buffered reader correctly in zone info compiler [#396]\r\n\r\n - Handle locale correctly zone info compiler [#397]\r\n```\r\n \r\n", "number": 21468, "review_comments": [], "title": "Update Joda Time to version 2.9.5" }
{ "commits": [ { "message": "Update Joda Time to version 2.9.5\n\nThis commit updates JodaTime to version 2.9.5 that contain a fix for a bug when parsing time zonesi (see https://github.com/yrodiere/joda-time/commit/eaaff1d0687fc86a745ca8f3ea0b069b3cf54233 or https://github.com/JodaOrg/joda-time/issues/373).\n\nIt also remove the joda-convert dependency that seems to be unused.\n\ncloses #20911" } ], "files": [ { "diff": "@@ -62,10 +62,7 @@ dependencies {\n compile 'com.carrotsearch:hppc:0.7.1'\n \n // time handling, remove with java 8 time\n- compile 'joda-time:joda-time:2.9.4'\n- // joda 2.0 moved to using volatile fields for datetime\n- // When updating to a new version, make sure to update our copy of BaseDateTime\n- compile 'org.joda:joda-convert:1.2'\n+ compile 'joda-time:joda-time:2.9.5'\n \n // json and yaml\n compile \"org.yaml:snakeyaml:${versions.snakeyaml}\"", "filename": "core/build.gradle", "status": "modified" }, { "diff": "@@ -0,0 +1 @@\n+5f01da7306363fad2028b916f3eab926262de928\n\\ No newline at end of file", "filename": "core/licenses/joda-time-2.9.5.jar.sha1", "status": "added" }, { "diff": "@@ -30,6 +30,8 @@\n import org.joda.time.DateTime;\n import org.joda.time.DateTimeConstants;\n import org.joda.time.DateTimeZone;\n+import org.joda.time.format.DateTimeFormat;\n+import org.joda.time.format.DateTimeFormatter;\n import org.joda.time.format.ISODateTimeFormat;\n \n import java.util.ArrayList;\n@@ -41,6 +43,7 @@\n import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n import static org.hamcrest.Matchers.lessThan;\n import static org.hamcrest.Matchers.lessThanOrEqualTo;\n+import static org.hamcrest.Matchers.startsWith;\n \n public class TimeZoneRoundingTests extends ESTestCase {\n \n@@ -511,6 +514,25 @@ public void testEdgeCasesTransition() {\n }\n }\n \n+ /**\n+ * Test that time zones are correctly parsed. There is a bug with\n+ * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373)\n+ */\n+ public void testsTimeZoneParsing() {\n+ final DateTime expected = new DateTime(2016, 11, 10, 5, 37, 59, randomDateTimeZone());\n+\n+ // Formatter used to print and parse the sample date.\n+ // Printing the date works but parsing it back fails\n+ // with Joda 2.9.4\n+ DateTimeFormatter formatter = DateTimeFormat.forPattern(\"YYYY-MM-dd'T'HH:mm:ss \" + randomFrom(\"ZZZ\", \"[ZZZ]\", \"'['ZZZ']'\"));\n+\n+ String dateTimeAsString = formatter.print(expected);\n+ assertThat(dateTimeAsString, startsWith(\"2016-11-10T05:37:59 \"));\n+\n+ DateTime parsedDateTime = formatter.parseDateTime(dateTimeAsString);\n+ assertThat(parsedDateTime.getZone(), equalTo(expected.getZone()));\n+ }\n+\n private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes,\n DateTimeZone tz) {\n assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz);", "filename": "core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java", "status": "modified" }, { "diff": "@@ -25,19 +25,15 @@\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.settings.Settings;\n-import org.elasticsearch.common.xcontent.ToXContent;\n-import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n-import org.elasticsearch.index.mapper.DocumentMapper;\n-import org.elasticsearch.index.mapper.DocumentMapperParser;\n-import org.elasticsearch.index.mapper.FieldMapper;\n-import org.elasticsearch.index.mapper.MapperParsingException;\n-import org.elasticsearch.index.mapper.ParsedDocument;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n import org.elasticsearch.test.InternalSettingsPlugin;\n import org.elasticsearch.test.VersionUtils;\n+import org.joda.time.DateTime;\n+import org.joda.time.DateTimeZone;\n+import org.joda.time.format.DateTimeFormat;\n import org.junit.Before;\n \n import java.io.IOException;\n@@ -354,4 +350,39 @@ public void testEmptyName() throws IOException {\n DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n assertEquals(mapping, defaultMapper.mappingSource().toString());\n }\n+\n+ /**\n+ * Test that time zones are correctly parsed by the {@link DateFieldMapper}.\n+ * There is a known bug with Joda 2.9.4 reported in https://github.com/JodaOrg/joda-time/issues/373.\n+ */\n+ public void testTimeZoneParsing() throws Exception {\n+ final String timeZonePattern = \"yyyy-MM-dd\" + randomFrom(\"ZZZ\", \"[ZZZ]\", \"'['ZZZ']'\");\n+\n+ String mapping = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"date\")\n+ .field(\"format\", timeZonePattern)\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject().string();\n+\n+ DocumentMapper mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, mapper.mappingSource().toString());\n+\n+ final DateTimeZone randomTimeZone = randomBoolean() ? DateTimeZone.forID(randomFrom(\"UTC\", \"CET\")) : randomDateTimeZone();\n+ final DateTime randomDate = new DateTime(2016, 03, 11, 0, 0, 0, randomTimeZone);\n+\n+ ParsedDocument doc = mapper.parse(\"test\", \"type\", \"1\", XContentFactory.jsonBuilder()\n+ .startObject()\n+ .field(\"field\", DateTimeFormat.forPattern(timeZonePattern).print(randomDate))\n+ .endObject()\n+ .bytes());\n+\n+ IndexableField[] fields = doc.rootDoc().getFields(\"field\");\n+ assertEquals(2, fields.length);\n+\n+ assertEquals(randomDate.withZone(DateTimeZone.UTC).getMillis(), fields[0].numericValue().longValue());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0 and master\r\n\r\n**OS version**: sysv init distros\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nThe addition of the `ES_JVM_OPTIONS` did not make it to [the debian init script packaging](https://github.com/elastic/elasticsearch/blob/master/distribution/deb/src/main/packaging/init.d/elasticsearch#L84-L86) or [the RHEL init script packaging](https://github.com/elastic/elasticsearch/blob/master/distribution/rpm/src/main/packaging/init.d/elasticsearch#L62-L65).\r\n\r\nIn the systemd scripts, all variables are passed through, but not in the init scripts. This blocks anyone from setting ES_JVM_OPTIONS to an alternate location using /etc/default/elasticsearch or /etc/sysconfig/elasticsearch. It also prevents any automation from using a non-standard location, and prevents anyone running multiple instances from using the existing init scripts to do it (unless they want a single /etc/elasticsearch/jvm.options file that they can't change).", "comments": [ { "body": "Thanks for reporting this, this is indeed a mistake. I will fix.\n", "created_at": "2016-11-02T18:37:51Z" }, { "body": "Hi @jasontedor \n\nCan we get any update on this?\n\nI am trying to install ES 5.0 using ansible elasticsearch git project and since I am getting the below error after I set the ES_JAVA_OPTS in the below file.\n\n**File location:- (Under ansible-elasticsearch/templates/elasticsearch.j2) and during playbook run file copied to the below path /etc/elasticsearch/master_node_elasticsearch**\n\n**Note:- I have attached my customized playbook**\n\n`fatal: [10.247.0.31]: FAILED! => {\"changed\": true, \"cmd\": [\"sudo\", \"service\", \"master_node_elasticsearch\", \"start\"], \"delta\": \"0:00:00.086526\", \"end\": \"2016-11-09 07:46:34.015391\", \"failed\": true, \"rc\": 1, \"start\": \"2016-11-09 07:46:33.928865\", \"stderr\": \"\", \"stdout\": \"Starting elasticsearch: Error: encountered environment variables that are no longer supported\\nUse jvm.options or ES_JAVA_OPTS to configure the JVM\\nES_HEAP_SIZE=1g: set -Xms1g and -Xmx1g in jvm.options or add \\\"-Xms1g -Xmx1g\\\" to ES_JAVA_OPTS\\n[FAILED]\", \"stdout_lines\": [\"Starting elasticsearch: Error: encountered environment variables that are no longer supported\", \"Use jvm.options or ES_JAVA_OPTS to configure the JVM\", \"ES_HEAP_SIZE=1g: set -Xms1g and -Xmx1g in jvm.options or add \\\"-Xms1g -Xmx1g\\\" to ES_JAVA_OPTS\", \"[FAILED]\"], \"warnings\": [\"Consider using 'become', 'become_method', and 'become_user' rather than running sudo\"]}\n`\nCorrect me know If I am doing any wrong in setting up ES_JAVA_OPTS.\n[rdaas-elasticsearch.zip](https://github.com/elastic/elasticsearch/files/580760/rdaas-elasticsearch.zip)\n", "created_at": "2016-11-09T13:03:43Z" }, { "body": "@GaneshbabuRamamoorthy Your problem is unrelated to the issue reported here; you are using an environment variable (`ES_HEAP_SIZE`) that is no longer supported.\n", "created_at": "2016-11-09T21:06:12Z" }, { "body": "Thanks for taking care of this one, @jasontedor! 👍 \n", "created_at": "2016-11-10T05:36:40Z" } ], "number": 21255, "title": "Packaging: /etc/init.d/elasticsearch doesn't pass through ES_JVM_OPTIONS" }
{ "body": "The environment variable ES_JVM_OPTIONS allows end-users to specify a\r\ncustom location for the jvm.options file. Unfortunately, this\r\nenvironment variable is not exported from the SysV init scripts. This\r\ncommit addresses this issue, and includes a test that ES_JVM_OPTIONS and\r\nES_JAVA_OPTS work for the SysV init packages.\r\n\r\nCloses #21255", "number": 21445, "review_comments": [ { "body": "I like it, thanks!\n", "created_at": "2016-11-10T08:56:48Z" }, { "body": "This has been adapted to work on ubuntu-1204 in https://github.com/elastic/elasticsearch/commit/3f7f8e4b97021e804d4ea21966fe4a13aca3d0bf\n", "created_at": "2016-11-10T10:12:49Z" }, { "body": "Thank you @tlrx!\n", "created_at": "2016-11-10T11:49:36Z" } ], "title": "Export ES_JVM_OPTIONS for SysV init" }
{ "commits": [ { "message": "Export ES_JVM_OPTIONS for SysV init\n\nThe environment variable ES_JVM_OPTIONS allows end-users to specify a\ncustom location for the jvm.options file. Unfortunately, this\nenvironment variable is not exported from the SysV init scripts. This\ncommit addresses this issue, and includes a test that ES_JVM_OPTIONS and\nES_JAVA_OPTS work for the SysV init packages." } ], "files": [ { "diff": "@@ -84,6 +84,7 @@ DAEMON_OPTS=\"-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$D\n export ES_JAVA_OPTS\n export JAVA_HOME\n export ES_INCLUDE\n+export ES_JVM_OPTIONS\n \n if [ ! -x \"$DAEMON\" ]; then\n \techo \"The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON\"", "filename": "distribution/deb/src/main/packaging/init.d/elasticsearch", "status": "modified" }, { "diff": "@@ -60,9 +60,10 @@ prog=\"elasticsearch\"\n pidfile=\"$PID_DIR/${prog}.pid\"\n \n export ES_JAVA_OPTS\n-export ES_STARTUP_SLEEP_TIME\n export JAVA_HOME\n export ES_INCLUDE\n+export ES_JVM_OPTIONS\n+export ES_STARTUP_SLEEP_TIME\n \n lockfile=/var/lock/subsys/$prog\n ", "filename": "distribution/rpm/src/main/packaging/init.d/elasticsearch", "status": "modified" }, { "diff": "@@ -110,12 +110,12 @@ setup() {\n local temp=`mktemp -d`\n touch \"$temp/jvm.options\"\n chown -R elasticsearch:elasticsearch \"$temp\"\n- echo \"-Xms264m\" >> \"$temp/jvm.options\"\n- echo \"-Xmx264m\" >> \"$temp/jvm.options\"\n+ echo \"-Xms512m\" >> \"$temp/jvm.options\"\n+ echo \"-Xmx512m\" >> \"$temp/jvm.options\"\n export ES_JVM_OPTIONS=\"$temp/jvm.options\"\n export ES_JAVA_OPTS=\"-XX:-UseCompressedOops\"\n start_elasticsearch_service\n- curl -s -XGET localhost:9200/_nodes | fgrep '\"heap_init_in_bytes\":276824064'\n+ curl -s -XGET localhost:9200/_nodes | fgrep '\"heap_init_in_bytes\":536870912'\n curl -s -XGET localhost:9200/_nodes | fgrep '\"using_compressed_ordinary_object_pointers\":\"false\"'\n stop_elasticsearch_service\n export ES_JVM_OPTIONS=$es_jvm_options", "filename": "qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats", "status": "modified" }, { "diff": "@@ -134,6 +134,25 @@ setup() {\n [ \"$status\" -eq 3 ] || [ \"$status\" -eq 4 ]\n }\n \n+@test \"[INIT.D] start Elasticsearch with custom JVM options\" {\n+ local es_java_opts=$ES_JAVA_OPTS\n+ local es_jvm_options=$ES_JVM_OPTIONS\n+ local temp=`mktemp -d`\n+ touch \"$temp/jvm.options\"\n+ chown -R elasticsearch:elasticsearch \"$temp\"\n+ echo \"-Xms512m\" >> \"$temp/jvm.options\"\n+ echo \"-Xmx512m\" >> \"$temp/jvm.options\"\n+ cp /etc/sysconfig/elasticsearch \"$temp/elasticsearch\"\n+ echo \"ES_JVM_OPTIONS=\\\"$temp/jvm.options\\\"\" >> /etc/sysconfig/elasticsearch\n+ echo \"ES_JAVA_OPTS=\\\"-XX:-UseCompressedOops\\\"\" >> /etc/sysconfig/elasticsearch\n+ service elasticsearch start\n+ wait_for_elasticsearch_status\n+ curl -s -XGET localhost:9200/_nodes | fgrep '\"heap_init_in_bytes\":536870912'\n+ curl -s -XGET localhost:9200/_nodes | fgrep '\"using_compressed_ordinary_object_pointers\":\"false\"'\n+ service elasticsearch stop\n+ cp \"$temp/elasticsearch\" /etc/sysconfig/elasticsearch\n+}\n+\n # Simulates the behavior of a system restart:\n # the PID directory is deleted by the operating system\n # but it should not block ES from starting", "filename": "qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0\r\n**Plugins installed**: []\r\n**JVM version**: 1.8\r\n**OS version**: macOS 10.12.1\r\n\r\n**Description of the problem including expected versus actual behaviour**:\r\n\r\n`InternalSearchHit.hasSource()` seems to return incorrect values, and it's apparent why when you look at the source code:\r\n\r\n```\r\n @Override\r\n public boolean hasSource() {\r\n return source == null;\r\n }\r\n```\r\n\r\nThe `SearchFieldsIT.testScriptDocAndFields()` test also seems to be incorrect, expecting this wrong behaviour (since when specifying scripted fields, source is not returned unless explicitly requested) :\r\n\r\n`assertThat(response.getHits().getAt(0).hasSource(), equalTo(true));`\r\n\r\nLet me know if I've got this backwards, but I don't think so. At the very least if it's expected behaviour, the method name should change back to `isSourceEmpty()`\r\n\r\n", "comments": [ { "body": "@javanna could you take a look at this please?\n", "created_at": "2016-11-09T13:55:07Z" }, { "body": "thanks for reporting this @puug , we seem to have renamed the method a while ago but forgot to update its behaviour unfortunately :) I opened #21441 to fix this issue.\n", "created_at": "2016-11-09T21:17:38Z" } ], "number": 21419, "title": "InternalSearchHit.hasSource() returning incorrect value" }
{ "body": "The method used to be called isSourceEmpty, and was renamed to hasSource, but the return value never changed. Updated tests and users accordingly.\r\n\r\nCloses #21419", "number": 21441, "review_comments": [], "title": "Fix InternalSearchHit#hasSource to return the proper boolean value" }
{ "commits": [ { "message": "Fix InternalSearchHit#hasSource to return the proper boolean value\n\nThe method used to be called isSourceEmpty, and was renamed to hasSource, but the return value never changed. Updated tests and users accordingly.\n\nCloses #21419" } ], "files": [ { "diff": "@@ -249,7 +249,7 @@ public byte[] source() {\n \n @Override\n public boolean hasSource() {\n- return source == null;\n+ return source != null;\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java", "status": "modified" }, { "diff": "@@ -293,7 +293,7 @@ public void testScriptDocAndFields() throws Exception {\n assertNoFailures(response);\n \n assertThat(response.getHits().totalHits(), equalTo(3L));\n- assertThat(response.getHits().getAt(0).hasSource(), equalTo(true));\n+ assertFalse(response.getHits().getAt(0).hasSource());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"1\"));\n Set<String> fields = new HashSet<>(response.getHits().getAt(0).fields().keySet());\n fields.remove(TimestampFieldMapper.NAME); // randomly enabled via templates", "filename": "core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java", "status": "modified" }, { "diff": "@@ -19,14 +19,14 @@\n \n package org.elasticsearch.search.internal;\n \n+import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.io.stream.InputStreamStreamInput;\n import org.elasticsearch.common.text.Text;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.search.SearchShardTarget;\n import org.elasticsearch.test.ESTestCase;\n \n-import java.io.ByteArrayInputStream;\n import java.io.InputStream;\n import java.util.HashMap;\n import java.util.Map;\n@@ -87,4 +87,11 @@ public void testNullSource() throws Exception {\n assertThat(searchHit.getSourceRef(), nullValue());\n assertThat(searchHit.getSourceAsString(), nullValue());\n }\n+\n+ public void testHasSource() {\n+ InternalSearchHit searchHit = new InternalSearchHit(randomInt());\n+ assertFalse(searchHit.hasSource());\n+ searchHit.sourceRef(new BytesArray(\"{}\"));\n+ assertTrue(searchHit.hasSource());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java", "status": "modified" }, { "diff": "@@ -197,9 +197,9 @@ private static class ClientHit implements Hit {\n private final SearchHit delegate;\n private final BytesReference source;\n \n- public ClientHit(SearchHit delegate) {\n+ ClientHit(SearchHit delegate) {\n this.delegate = delegate;\n- source = delegate.hasSource() ? null : delegate.getSourceRef();\n+ source = delegate.hasSource() ? delegate.getSourceRef() : null;\n }\n \n @Override", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.1.1(2.x)\n\n**JVM version**: 1.8.0_66-b17 \n\n**OS version**: Mac OS X 10.10.5 \n\n**Description of the problem including expected versus actual behavior**:\njava.lang.NullPointerException will be thrown when converting search response to json if search hits without no source returned(just specify search field) and compress is enable, because the **isCompressed** method of **LZFCompressor** and **DeflateCompressor** will check the parameter 'bytes' is null or not before do uncompress operation.\n Stacktrace can be referenced at log section.\n\n**Steps to reproduce**:\n1. Use SearchRequestBuilder.addField() to add search result field before execute search operation and it will not return source of document for search response hits.\n2. Execute search operation.\n3. Get the reference SearchResponse and use com.alibaba.fastjson.JSON.toJSONString() to convert it into json string, you will got NPE\n\n**Provide logs (if relevant)**:\n\njava.lang.NullPointerException: null\n at org.elasticsearch.common.compress.lzf.LZFCompressor.isCompressed(LZFCompressor.java:54) ~[elasticsearch-2.1.1.jar:2.1.1]\n at org.elasticsearch.common.compress.CompressorFactory.compressor(CompressorFactory.java:74) ~[elasticsearch-2.1.1.jar:2.1.1]\n at org.elasticsearch.common.compress.CompressorFactory.uncompressIfNeeded(CompressorFactory.java:118) ~[elasticsearch-2.1.1.jar:2.1.1]\n at org.elasticsearch.search.internal.InternalSearchHit.sourceRef(InternalSearchHit.java:200) ~[elasticsearch-2.1.1.jar:2.1.1]\n at org.elasticsearch.search.internal.InternalSearchHit.getSourceRef(InternalSearchHit.java:219) ~[elasticsearch-2.1.1.jar:2.1.1]\n at Serializer_17.write1(Unknown Source) ~[na:na]\n at Serializer_17.write(Unknown Source) ~[na:na]\n at com.alibaba.fastjson.serializer.ArraySerializer.write(ArraySerializer.java:64) ~[fastjson-1.2.8.jar:na]\n at com.alibaba.fastjson.serializer.JSONSerializer.writeWithFieldName(JSONSerializer.java:403) ~[fastjson-1.2.8.jar:na]\n at Serializer_16.write1(Unknown Source) ~[na:na]\n at Serializer_16.write(Unknown Source) ~[na:na]\n at com.alibaba.fastjson.serializer.JSONSerializer.writeWithFieldName(JSONSerializer.java:403) ~[fastjson-1.2.8.jar:na]\n at Serializer_14.write1(Unknown Source) ~[na:na]\n at Serializer_14.write(Unknown Source) ~[na:na]\n at com.alibaba.fastjson.serializer.JSONSerializer.writeWithFieldName(JSONSerializer.java:403) ~[fastjson-1.2.8.jar:na]\n at Serializer_12.write1(Unknown Source) ~[na:na]\n at Serializer_12.write(Unknown Source) ~[na:na]\n at com.alibaba.fastjson.serializer.JSONSerializer.write(JSONSerializer.java:374) ~[fastjson-1.2.8.jar:na]\n at com.alibaba.fastjson.JSON.toJSONString(JSON.java:394) ~[fastjson-1.2.8.jar:na]\n at com.alibaba.fastjson.JSON.toJSONString(JSON.java:382) ~[fastjson-1.2.8.jar:na]\n", "comments": [ { "body": "I think this has been fixed by https://github.com/elastic/elasticsearch/pull/18957\n\nCould you try your test on master and let us know if it is indeed fixed?\n", "created_at": "2016-07-06T12:55:52Z" }, { "body": "Sorry for replying late.\n\nI reviewed the changed code for #18957, but I don't think it is fixed in it.\nThe NPE is thrown by LZFCompressor or DeflateCompressor when _source is not returned and _source is not be disabled. I guess it will be fixed at LZFCompressor and DeflateCompressor from the stacktrace.\n", "created_at": "2016-07-17T00:16:48Z" }, { "body": "I also met this issue. Function `sourceRef()` in class InternalSearchHit always throw NullPointerException when `source` is `null`.\n\nElasticsearch version: 2.4.0\n", "created_at": "2016-11-09T10:48:07Z" } ], "number": 19279, "title": "NLP will throw when no source for search result with compression" }
{ "body": "`sourceRef` always throw `NullPointerException` when `source` is `null`\r\n\r\nCloses #19279", "number": 21431, "review_comments": [ { "body": "could you please add the curly brackets around this statement? we tend to use them although not needed around a single, for better readability.\n", "created_at": "2016-11-09T21:03:27Z" } ], "title": "Null checked for source when calling sourceRef" }
{ "commits": [ { "message": "Null checked for source when calling sourceRef (issue #19279)" }, { "message": "Update pull request #21431\n - add the curly brackets around statement\n - add unit test for this case" } ], "files": [ { "diff": "@@ -200,6 +200,10 @@ public NestedIdentity getNestedIdentity() {\n */\n @Override\n public BytesReference sourceRef() {\n+ if (this.source == null) {\n+ return null;\n+ }\n+\n try {\n this.source = CompressorFactory.uncompressIfNeeded(this.source);\n return this.source;", "filename": "core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java", "status": "modified" }, { "diff": "@@ -76,4 +76,15 @@ public void testSerializeShardTarget() throws Exception {\n assertThat(results.getAt(1).shard(), equalTo(target));\n }\n \n+ public void testNullSource() throws Exception {\n+ InternalSearchHit searchHit = new InternalSearchHit(0, \"_id\", new Text(\"_type\"), null);\n+\n+ assertThat(searchHit.source(), nullValue());\n+ assertThat(searchHit.sourceRef(), nullValue());\n+ assertThat(searchHit.sourceAsMap(), nullValue());\n+ assertThat(searchHit.sourceAsString(), nullValue());\n+ assertThat(searchHit.getSource(), nullValue());\n+ assertThat(searchHit.getSourceRef(), nullValue());\n+ assertThat(searchHit.getSourceAsString(), nullValue());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java", "status": "modified" } ] }
{ "body": "This removes the exception thrown when serializing a reindex request\r\nto a node < 5.1.0 and instead silently falls back to a single slice.\r\nThis is required to be compatible with `assertVersionSerializeable`.\r\n\r\nRelates to #20767", "comments": [ { "body": "@imotov this is what I need to backport #20767.\n", "created_at": "2016-11-05T01:47:55Z" }, { "body": "I'm going to open a new PR that does what I described in https://github.com/elastic/elasticsearch/pull/21350#discussion_r86670432\n", "created_at": "2016-11-08T16:07:09Z" } ], "number": 21350, "title": "Remove exception for sliced reindex on mixed version" }
{ "body": "This gives us a handy place to tell users that they can't make a certain request in a mixed version cluster. It fixes reindex to use that mechanism.\r\n\r\nReplaces #21350\r\nRelates to #20767", "number": 21409, "review_comments": [], "title": "Ignore IllegalArgumentException with assertVersionSerializable" }
{ "commits": [ { "message": "Ignore IAE when checking for version serialization\n\nThis allows us to throw IllegalArgumentException from serialization code\nwhen the destination node can't support the request." }, { "message": "Switch reindex with slices error to IAE\n\nIf you try to reindex with multiple slices against a node that\ndoesn't support it we throw an `IllegalArgumentException` so\n`assertVersionSerializable` is ok with it and so if this happens\nin REST it comes back as a 400 error." } ], "files": [ { "diff": "@@ -424,7 +424,7 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeVInt(slices);\n } else {\n if (slices > 1) {\n- throw new UnsupportedOperationException(\"Attempting to send sliced reindex-style request to a node that doesn't support \"\n+ throw new IllegalArgumentException(\"Attempting to send sliced reindex-style request to a node that doesn't support \"\n + \"it. Version is [\" + out.getVersion() + \"] but must be [\" + BulkByScrollTask.V_5_1_0_UNRELEASED + \"]\");\n }\n }", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java", "status": "modified" }, { "diff": "@@ -81,7 +81,7 @@ public void testReindexRequest() throws IOException {\n \n // Try slices with a version that doesn't support slices. That should fail.\n reindex.setSlices(between(2, 1000));\n- Exception e = expectThrows(UnsupportedOperationException.class, () -> roundTrip(Version.V_5_0_0_rc1, reindex, null));\n+ Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_5_0_0_rc1, reindex, null));\n assertEquals(\"Attempting to send sliced reindex-style request to a node that doesn't support it. \"\n + \"Version is [5.0.0-rc1] but must be [5.1.0]\", e.getMessage());\n \n@@ -105,7 +105,7 @@ public void testUpdateByQueryRequest() throws IOException {\n \n // Try slices with a version that doesn't support slices. That should fail.\n update.setSlices(between(2, 1000));\n- Exception e = expectThrows(UnsupportedOperationException.class, () -> roundTrip(Version.V_5_0_0_rc1, update, null));\n+ Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_5_0_0_rc1, update, null));\n assertEquals(\"Attempting to send sliced reindex-style request to a node that doesn't support it. \"\n + \"Version is [5.0.0-rc1] but must be [5.1.0]\", e.getMessage());\n \n@@ -126,7 +126,7 @@ public void testDeleteByQueryRequest() throws IOException {\n \n // Try slices with a version that doesn't support slices. That should fail.\n delete.setSlices(between(2, 1000));\n- Exception e = expectThrows(UnsupportedOperationException.class, () -> roundTrip(Version.V_5_0_0_rc1, delete, null));\n+ Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_5_0_0_rc1, delete, null));\n assertEquals(\"Attempting to send sliced reindex-style request to a node that doesn't support it. \"\n + \"Version is [5.0.0-rc1] but must be [5.1.0]\", e.getMessage());\n ", "filename": "modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java", "status": "modified" }, { "diff": "@@ -655,7 +655,13 @@ public static void assertVersionSerializable(Version version, Streamable streama\n if (streamable instanceof ActionRequest) {\n ((ActionRequest<?>) streamable).validate();\n }\n- BytesReference orig = serialize(version, streamable);\n+ BytesReference orig;\n+ try {\n+ orig = serialize(version, streamable);\n+ } catch (IllegalArgumentException e) {\n+ // Can't serialize with this version so skip this test.\n+ return;\n+ }\n StreamInput input = orig.streamInput();\n if (namedWriteableRegistry != null) {\n input = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry);", "filename": "test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java", "status": "modified" }, { "diff": "@@ -0,0 +1,55 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.test.hamcrest;\n+\n+import org.elasticsearch.Version;\n+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n+import org.elasticsearch.common.io.stream.StreamInput;\n+import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.io.stream.Streamable;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import java.io.IOException;\n+\n+import static java.util.Collections.emptyList;\n+import static org.elasticsearch.test.VersionUtils.randomVersion;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable;\n+\n+public class ElasticsearchAssertionsTests extends ESTestCase {\n+ public void testAssertVersionSerializableIsOkWithIllegalArgumentException() {\n+ Version version = randomVersion(random());\n+ NamedWriteableRegistry registry = new NamedWriteableRegistry(emptyList());\n+ Streamable testStreamable = new TestStreamable();\n+\n+ // Should catch the exception and do nothing.\n+ assertVersionSerializable(version, testStreamable, registry);\n+ }\n+\n+ public static class TestStreamable implements Streamable {\n+ @Override\n+ public void readFrom(StreamInput in) throws IOException {\n+ }\n+\n+ @Override\n+ public void writeTo(StreamOutput out) throws IOException {\n+ throw new IllegalArgumentException(\"Not supported.\");\n+ }\n+ }\n+}", "filename": "test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java", "status": "added" } ] }
{ "body": "Cancelling search with scroll can leave some search contexts behind. See test failure in https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-unix-compatibility/os=ubuntu/160/console\n", "comments": [ { "body": "😢 \n", "created_at": "2016-10-31T15:59:51Z" }, { "body": "I think I found the reason for the failure in these tests, but we need changes from #20767 to properly fix it.\n", "created_at": "2016-11-01T01:08:32Z" }, { "body": "I'll get to it! I've got something **so** close to ready for review that I'd like to finish off first then I'll address your comments in #20767.\n", "created_at": "2016-11-01T12:43:17Z" } ], "number": 21126, "title": "Search context is not cleaned when scroll search is cancelled" }
{ "body": "Currently the task cancellation command returns as soon as the top-level parent child is marked as cancelled. This create race conditions in tests where child tasks on other nodes may continue to run for some time after the main task is cancelled. This commit fixes this situation making task cancellation command to wait until it got propagated to all nodes that have child tasks.\r\n\r\nCloses #21126", "number": 21397, "review_comments": [ { "body": "I feel like we implement this pattern enough times that we should make a helper for it at some point. No need now, but at some point.\n", "created_at": "2016-11-08T12:06:26Z" } ], "title": "Task cancellation command should wait for all child nodes to receive cancellation request before returning" }
{ "commits": [ { "message": "Task cancellation command should wait for all child nodes to receive cancellation request before returning\n\nCurrently the task cancellation command returns as soon as the top-level parent child is marked as cancelled. This create race conditions in tests where child tasks on other nodes may continue to run for some time after the main task is cancelled. This commit fixes this situation making task cancellation command to wait until it got propagated to all nodes that have child tasks.\n\nCloses #21126" } ], "files": [ { "diff": "@@ -33,6 +33,7 @@\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.util.concurrent.AtomicArray;\n import org.elasticsearch.tasks.CancellableTask;\n import org.elasticsearch.tasks.TaskId;\n import org.elasticsearch.tasks.TaskInfo;\n@@ -46,6 +47,7 @@\n import org.elasticsearch.transport.TransportService;\n \n import java.io.IOException;\n+import java.util.ArrayList;\n import java.util.List;\n import java.util.Set;\n import java.util.concurrent.atomic.AtomicInteger;\n@@ -118,12 +120,44 @@ protected synchronized void taskOperation(CancelTasksRequest request, Cancellabl\n Set<String> childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished);\n if (childNodes != null) {\n if (childNodes.isEmpty()) {\n+ // The task has no child tasks, so we can return immediately\n logger.trace(\"cancelling task {} with no children\", cancellableTask.getId());\n listener.onResponse(cancellableTask.taskInfo(clusterService.localNode().getId(), false));\n } else {\n+ // The task has some child tasks, we need to wait for until ban is set on all nodes\n logger.trace(\"cancelling task {} with children on nodes [{}]\", cancellableTask.getId(), childNodes);\n- setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock);\n- listener.onResponse(cancellableTask.taskInfo(clusterService.localNode().getId(), false));\n+ String nodeId = clusterService.localNode().getId();\n+ AtomicInteger responses = new AtomicInteger(childNodes.size());\n+ List<Exception> failures = new ArrayList<>();\n+ setBanOnNodes(request.getReason(), cancellableTask, childNodes, new ActionListener<Void>() {\n+ @Override\n+ public void onResponse(Void aVoid) {\n+ processResponse();\n+ }\n+\n+ @Override\n+ public void onFailure(Exception e) {\n+ synchronized (failures) {\n+ failures.add(e);\n+ }\n+ processResponse();\n+ }\n+\n+ private void processResponse() {\n+ banLock.onBanSet();\n+ if (responses.decrementAndGet() == 0) {\n+ if (failures.isEmpty() == false) {\n+ IllegalStateException exception = new IllegalStateException(\"failed to cancel children of the task [\" +\n+ cancellableTask.getId() + \"]\");\n+ failures.forEach(exception::addSuppressed);\n+ listener.onFailure(exception);\n+ } else {\n+ listener.onResponse(cancellableTask.taskInfo(nodeId, false));\n+ }\n+ }\n+ }\n+ });\n+\n }\n } else {\n logger.trace(\"task {} is already cancelled\", cancellableTask.getId());\n@@ -136,39 +170,40 @@ protected boolean accumulateExceptions() {\n return true;\n }\n \n- private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {\n+ private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, ActionListener<Void> listener) {\n sendSetBanRequest(nodes,\n BanParentTaskRequest.createSetBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()), reason),\n- banLock);\n+ listener);\n }\n \n private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {\n sendRemoveBanRequest(nodes,\n BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId())));\n }\n \n- private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {\n+ private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, ActionListener<Void> listener) {\n ClusterState clusterState = clusterService.state();\n for (String node : nodes) {\n DiscoveryNode discoveryNode = clusterState.getNodes().get(node);\n if (discoveryNode != null) {\n // Check if node still in the cluster\n- logger.debug(\"Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]\", request.parentTaskId, node,\n+ logger.trace(\"Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]\", request.parentTaskId, node,\n request.ban);\n transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,\n new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {\n @Override\n public void handleResponse(TransportResponse.Empty response) {\n- banLock.onBanSet();\n+ listener.onResponse(null);\n }\n \n @Override\n public void handleException(TransportException exp) {\n- banLock.onBanSet();\n+ logger.warn(\"Cannot send ban for tasks with the parent [{}] to the node [{}]\", request.parentTaskId, node);\n+ listener.onFailure(exp);\n }\n });\n } else {\n- banLock.onBanSet();\n+ listener.onResponse(null);\n logger.debug(\"Cannot send ban for tasks with the parent [{}] to the node [{}] - the node no longer in the cluster\",\n request.parentTaskId, node);\n }", "filename": "core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java", "status": "modified" }, { "diff": "@@ -176,7 +176,6 @@ public void testCancellationDuringFetchPhase() throws Exception {\n ensureSearchWasCancelled(searchResponse);\n }\n \n- @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/21126\")\n public void testCancellationOfScrollSearches() throws Exception {\n \n List<ScriptedBlockPlugin> plugins = initBlockFactory();\n@@ -198,7 +197,6 @@ public void testCancellationOfScrollSearches() throws Exception {\n }\n \n \n- @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/21126\")\n public void testCancellationOfScrollSearchesOnFollowupRequests() throws Exception {\n \n List<ScriptedBlockPlugin> plugins = initBlockFactory();", "filename": "core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java", "status": "modified" } ] }