issue
dict
pr
dict
pr_details
dict
{ "body": "Due to a misordering of the HTTP handlers, the Netty 4 HTTP server\nmishandles Expect: 100-continue headers from clients. This commit fixes\nthis issue by ordering the handlers correctly.\n\nCloses #19834\n", "comments": [ { "body": "The Netty [docs](https://github.com/netty/netty/blob/e44c562932ed4310a6915b58b1d8dcb5e964c6f8/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectAggregator.java#L50-L51) make the issue clear here, these handlers are just in the wrong order.\n", "created_at": "2016-08-10T04:31:16Z" }, { "body": "LGTM\n", "created_at": "2016-08-10T07:43:42Z" }, { "body": "Is there a simple failing test we can make for this change?\n", "created_at": "2016-08-10T08:33:44Z" }, { "body": "@rmuir I created #19908 for that.\n", "created_at": "2016-08-10T08:55:14Z" }, { "body": "Thanks for reviewing @tlrx, and especially for adding a test in #19908.\n", "created_at": "2016-08-10T14:43:33Z" } ], "number": 19904, "title": "Fix expect 100 continue header handling" }
{ "body": "This PR adds a test to verify that the Netty 4 HTTP transport supports the `Expect: 100-continue` header.\n\nRequires #19904\n", "number": 19908, "review_comments": [ { "body": "May be `final Collection<? extends HttpRequest>`?\n", "created_at": "2016-08-10T08:57:06Z" }, { "body": "So we can may be keep here `HttpRequest`?\n", "created_at": "2016-08-10T08:57:27Z" }, { "body": "Yes, sorry, I pushed too quickly. These are leftovers.\n", "created_at": "2016-08-10T09:22:24Z" } ], "title": "[Test] Add test for Netty4 HTTP support of header 100-continue" }
{ "commits": [ { "message": "[Test] Add test for Netty4 HTTP support of header 100-continue\n\nRelated to #19904" }, { "message": "Update after David comments" } ], "files": [ { "diff": "@@ -29,6 +29,7 @@\n import io.netty.channel.socket.SocketChannel;\n import io.netty.channel.socket.nio.NioSocketChannel;\n import io.netty.handler.codec.http.DefaultFullHttpRequest;\n+import io.netty.handler.codec.http.FullHttpRequest;\n import io.netty.handler.codec.http.FullHttpResponse;\n import io.netty.handler.codec.http.HttpHeaderNames;\n import io.netty.handler.codec.http.HttpMethod;\n@@ -51,6 +52,7 @@\n import java.util.Collections;\n import java.util.List;\n import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.TimeUnit;\n \n import static io.netty.handler.codec.http.HttpHeaderNames.HOST;\n import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;\n@@ -99,6 +101,12 @@ public final Collection<FullHttpResponse> post(SocketAddress remoteAddress, Tupl\n return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies);\n }\n \n+ public final FullHttpResponse post(SocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException {\n+ Collection<FullHttpResponse> responses = sendRequests(remoteAddress, Collections.singleton(httpRequest));\n+ assert responses.size() == 1 : \"expected 1 and only 1 http response\";\n+ return responses.iterator().next();\n+ }\n+\n @SafeVarargs // Safe not because it doesn't do anything with the type parameters but because it won't leak them into other methods.\n public final Collection<FullHttpResponse> put(SocketAddress remoteAddress, Tuple<String, CharSequence>... urisAndBodies)\n throws InterruptedException {\n@@ -134,7 +142,7 @@ private synchronized Collection<FullHttpResponse> sendRequests(\n for (HttpRequest request : requests) {\n channelFuture.channel().writeAndFlush(request);\n }\n- latch.await();\n+ latch.await(10, TimeUnit.SECONDS);\n \n } finally {\n if (channelFuture != null) {", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java", "status": "modified" }, { "diff": "@@ -19,19 +19,31 @@\n \n package org.elasticsearch.http.netty4;\n \n+import io.netty.buffer.ByteBufUtil;\n+import io.netty.buffer.Unpooled;\n+import io.netty.handler.codec.http.DefaultFullHttpRequest;\n+import io.netty.handler.codec.http.FullHttpRequest;\n+import io.netty.handler.codec.http.FullHttpResponse;\n import io.netty.handler.codec.http.HttpMethod;\n+import io.netty.handler.codec.http.HttpResponseStatus;\n+import io.netty.handler.codec.http.HttpUtil;\n+import io.netty.handler.codec.http.HttpVersion;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.network.NetworkService;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.transport.InetSocketTransportAddress;\n import org.elasticsearch.common.util.MockBigArrays;\n import org.elasticsearch.http.netty4.cors.Netty4CorsConfig;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n+import org.elasticsearch.rest.BytesRestResponse;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.junit.After;\n import org.junit.Before;\n \n+import java.nio.charset.StandardCharsets;\n import java.util.Arrays;\n import java.util.Collections;\n import java.util.HashSet;\n@@ -43,7 +55,9 @@\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n+import static org.elasticsearch.rest.RestStatus.OK;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.is;\n \n /**\n * Tests for the {@link Netty4HttpServerTransport} class.\n@@ -89,4 +103,29 @@ public void testCorsConfig() {\n transport.close();\n }\n \n+ /**\n+ * Test that {@link Netty4HttpServerTransport} supports the \"Expect: 100-continue\" HTTP header\n+ */\n+ public void testExpectContinueHeader() throws Exception {\n+ try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) {\n+ transport.httpServerAdapter((request, channel, context) ->\n+ channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray(\"done\"))));\n+ transport.start();\n+ InetSocketTransportAddress remoteAddress = (InetSocketTransportAddress) randomFrom(transport.boundAddress().boundAddresses());\n+\n+ try (Netty4HttpClient client = new Netty4HttpClient()) {\n+ FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"/\");\n+ HttpUtil.set100ContinueExpected(request, true);\n+ HttpUtil.setContentLength(request, 10);\n+\n+ FullHttpResponse response = client.post(remoteAddress.address(), request);\n+ assertThat(response.status(), is(HttpResponseStatus.CONTINUE));\n+\n+ request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"/\", Unpooled.EMPTY_BUFFER);\n+ response = client.post(remoteAddress.address(), request);\n+ assertThat(response.status(), is(HttpResponseStatus.OK));\n+ assertThat(new String(ByteBufUtil.getBytes(response.content()), StandardCharsets.UTF_8), is(\"done\"));\n+ }\n+ }\n+ }\n }", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java", "status": "modified" } ] }
{ "body": "When using Netty4 HTTP transport type I have the following issue:\n#### Netty4\n\nStarting master using `http.type: netty4`\n\n```\nbin/elasticsearch --E http.type=netty4\n```\n\nCreating a document with a medium size JSON document using cUrl (sample document is [here](https://gist.github.com/tlrx/5df7b9fe79e076b98c731e07d877650d)):\n\n```\ncurl -v -XPOST 'localhost:9200/samples/sample/0' -d '{\n \"title\":\"My awesome book\",\n \"pages\":456,\n \"price\":27.99,\n \"timestamp\":1428582942867,\n ..... 30 other fields...\n}'\n```\n\nHere is the curl output:\n\n```\n\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> POST /samples/sample/0 HTTP/1.1\n> Host: localhost:9200\n> User-Agent: curl/7.47.0\n> Accept: */*\n> Content-Length: 1681\n> Content-Type: application/x-www-form-urlencoded\n> Expect: 100-continue\n> \n* Empty reply from server\n* Connection #0 to host localhost left intact\ncurl: (52) Empty reply from server\n```\n\nAnd the elasticsearch logs:\n\n```\n\n[2016-08-05 17:46:13,354][WARN ][http.netty4 ] [UqG1hcq] caught exception while handling client http traffic, closing connection [id: 0x0ca9a8ca, L:/127.0.0.1:9200 - R:/127.0.0.1:43208]\njava.lang.UnsupportedOperationException: unsupported message type: DefaultFullHttpResponse (expected: ByteBuf, FileRegion)\n at io.netty.channel.nio.AbstractNioByteChannel.filterOutboundMessage(AbstractNioByteChannel.java:260)\n at io.netty.channel.AbstractChannel$AbstractUnsafe.write(AbstractChannel.java:799)\n at io.netty.channel.DefaultChannelPipeline$HeadContext.write(DefaultChannelPipeline.java:1291)\n at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:748)\n at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:811)\n at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:824)\n at io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:804)\n at io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:841)\n at io.netty.handler.codec.MessageAggregator.decode(MessageAggregator.java:222)\n at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:88)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:372)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:358)\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:350)\n at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:372)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:358)\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:350)\n at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:293)\n at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:267)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:372)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:358)\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:350)\n at io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:372)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:358)\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:350)\n at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:372)\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:358)\n at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:129)\n at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:571)\n at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:474)\n at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:428)\n at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:398)\n at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:877)\n at java.lang.Thread.run(Thread.java:745)\n```\n\nUpdate: as a workaround for users, forcing the `Expect` header to empty makes the request succeed:\n\n`curl -v -H \"Expect:\" -XPOST 'localhost:9200/samples/sample/0' -d '...'`\n#### Netty3\n\nIt works fine with `http.type: netty3`\n\n```\nbin/elasticsearch --E http.type=netty3\n```\n\nCreating the document:\n\n```\ncurl -v -XPOST 'localhost:9200/samples/sample/0' -d '{\n \"title\":\"My awesome book\",\n \"pages\":456,\n \"price\":27.99,\n \"timestamp\":1428582942867,\n ..... 30 other fields...\n}'\n```\n\nHere is the curl output:\n\n```\n\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> POST /samples/sample/0 HTTP/1.1\n> Host: localhost:9200\n> User-Agent: curl/7.47.0\n> Accept: */*\n> Content-Length: 1681\n> Content-Type: application/x-www-form-urlencoded\n> Expect: 100-continue\n> \n< HTTP/1.1 100 Continue\n* We are completely uploaded and fine\n< HTTP/1.1 201 Created\n< Location: /samples/sample/0\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 142\n< \n* Connection #0 to host localhost left intact\n{\"_index\":\"samples\",\"_type\":\"sample\",\"_id\":\"0\",\"_version\":1,\"result\":\"created\",\"_shards\":{\"total\":2,\"successful\":1,\"failed\":0},\"created\":true}% \n\n```\n", "comments": [ { "body": "For what it worth, it looks like the `Netty4HttpServerTransport` correctly initializes the `Channel` using a `HttpObjectAggregator` which seems to be in charge of handling the `Expect: 100-continue` header with the `io.netty.handler.codec.http.HttpObjectAggregator.newContinueResponse()` method\n", "created_at": "2016-08-05T16:02:57Z" }, { "body": "It seems to be a Netty issue and I have a potential fix. I'm trying to create a test to reproduce this and verify my fix. If successful I'll create a pull request on Netty project.\n", "created_at": "2016-08-09T12:48:27Z" }, { "body": "I'm not convinced this is a Netty issue, I think the handlers are just ordered incorrectly. I opened #19904.\n", "created_at": "2016-08-10T04:41:29Z" }, { "body": "@jasontedor I agree - that's also my conclusion after some more digging. I was about to submit the same change as you.\n", "created_at": "2016-08-10T07:43:10Z" } ], "number": 19834, "title": "Netty4 HTTP does not support \"Expect: 100-continue\" header" }
{ "body": "Due to a misordering of the HTTP handlers, the Netty 4 HTTP server\nmishandles Expect: 100-continue headers from clients. This commit fixes\nthis issue by ordering the handlers correctly.\n\nCloses #19834\n", "number": 19904, "review_comments": [], "title": "Fix expect 100 continue header handling" }
{ "commits": [ { "message": "Fix expect 100 continue header handling\n\nDue to a misordering of the HTTP handlers, the Netty 4 HTTP server\nmishandles Expect: 100-continue headers from clients. This commit fixes\nthis issue by ordering the handlers correctly." } ], "files": [ { "diff": "@@ -548,12 +548,12 @@ protected void initChannel(Channel ch) throws Exception {\n decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR);\n ch.pipeline().addLast(\"decoder\", decoder);\n ch.pipeline().addLast(\"decoder_compress\", new HttpContentDecompressor());\n+ ch.pipeline().addLast(\"encoder\", new HttpResponseEncoder());\n final HttpObjectAggregator aggregator = new HttpObjectAggregator(Math.toIntExact(transport.maxContentLength.bytes()));\n if (transport.maxCompositeBufferComponents != -1) {\n aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);\n }\n ch.pipeline().addLast(\"aggregator\", aggregator);\n- ch.pipeline().addLast(\"encoder\", new HttpResponseEncoder());\n if (transport.compression) {\n ch.pipeline().addLast(\"encoder_compress\", new HttpContentCompressor(transport.compressionLevel));\n }", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java", "status": "modified" } ] }
{ "body": "With alpha5-SNAPSHOT, here is an extract of the generated `pom.xml`. See https://oss.sonatype.org/content/repositories/snapshots/org/elasticsearch/client/rest/5.0.0-alpha5-SNAPSHOT/rest-5.0.0-alpha5-20160805.100826-168.pom\n\n``` xml\n<dependency>\n <groupId>org.apache.httpcomponents</groupId>\n <artifactId>httpcore</artifactId>\n <version>4.4.5</version>\n <scope>runtime</scope>\n</dependency>\n<dependency>\n <groupId>org.apache.httpcomponents</groupId>\n <artifactId>httpasyncclient</artifactId>\n <version>4.1.2</version>\n <scope>runtime</scope>\n</dependency>\n```\n\nWhich basically means that you can't use OOTB the apache classes:\n\n``` java\nRestClient restClient = RestClient.builder(\n new HttpHost(\"http\", \"localhost\", 9200),\n new HttpHost(\"http\", \"localhost\", 9201)).build();\n```\n\nThis won't compile.\n\nNote that it was working well with alpha4. I did not try a \"final\" released version of alpha5 though.\n", "comments": [ { "body": "The workaround for now is to add in the project which uses a REST client:\n\n``` xml\n<dependency>\n <groupId>org.apache.httpcomponents</groupId>\n <artifactId>httpcore</artifactId>\n <version>4.4.5</version>\n <scope>compile</scope>\n</dependency>\n<dependency>\n <groupId>org.apache.httpcomponents</groupId>\n <artifactId>httpasyncclient</artifactId>\n <version>4.1.2</version>\n <scope>compile</scope>\n</dependency>\n<dependency>\n <groupId>org.apache.httpcomponents</groupId>\n <artifactId>httpclient</artifactId>\n <version>4.1.2</version>\n <scope>compile</scope>\n</dependency>\n```\n", "created_at": "2016-08-05T16:25:26Z" }, { "body": "Hmm - I think it'd be much more friendly to call them compile dependencies. We mostly want them to be. If the user really wants them to be runtime dependencies they can override the scope in their pom.\n", "created_at": "2016-08-05T16:39:46Z" }, { "body": "I have no idea where this runtime thing comes from, they are declared compile in our build.gradle. https://github.com/elastic/elasticsearch/blob/master/client/rest/build.gradle#L34\n", "created_at": "2016-08-05T16:42:00Z" }, { "body": "I expect it is a pom generation thing.\n", "created_at": "2016-08-05T17:13:48Z" }, { "body": "@rjernst do you know where this may come from? let me know if I can help.\n", "created_at": "2016-08-05T17:15:26Z" }, { "body": "This is an issue with the `maven-publish` plugin in gradle, which the unified release uses. In alpha5, we generated the poms for the client jars using the new mechanism, while the old jar poms were still built using the `maven` plugin.\n\nI opened a PR to fix: #19876\n", "created_at": "2016-08-08T18:57:22Z" } ], "number": 19835, "title": "REST client pom.xml has runtime dependencies" }
{ "body": "This change works around a known issue with using the maven-publish\ngralde plugin. All deps are marked in the generated pom as runtime. With\nthis change, they are set back to compile time. This also simplified the\ntransitive dependencies exclusion to work the same as how it was fixed in\ngradle 2.14 (wildcard exclusions).\n\ncloses #19835\n", "number": 19876, "review_comments": [], "title": "Build: Fix compile time deps in poms and simplify transitive exclusions" }
{ "commits": [ { "message": "Build: Fix compile time deps in poms and simplify transitive exclusions\n\nThis change works around a known issue with using the maven-publish\ngralde plugin. All deps are marked in the generated pom as runtime. With\nthis change, they are set back to compile time. This also simplified the\ntransitive dependencies exclusion to work the same as how it was fixed in\ngradle 2.14 (wildcard exclusions).\n\ncloses #19835" } ], "files": [ { "diff": "@@ -270,7 +270,7 @@ class BuildPlugin implements Plugin<Project> {\n \n // add exclusions to the pom directly, for each of the transitive deps of this project's deps\n project.modifyPom { MavenPom pom ->\n- pom.withXml(removeTransitiveDependencies(project))\n+ pom.withXml(fixupDependencies(project))\n }\n }\n \n@@ -299,9 +299,16 @@ class BuildPlugin implements Plugin<Project> {\n }\n }\n \n- /** Returns a closure which can be used with a MavenPom for removing transitive dependencies. */\n- private static Closure removeTransitiveDependencies(Project project) {\n- // TODO: remove this when enforcing gradle 2.13+, it now properly handles exclusions\n+ /**\n+ * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms.\n+ *\n+ * <ul>\n+ * <li>Remove transitive dependencies (using wildcard exclusions, fixed in gradle 2.14)</li>\n+ * <li>Set compile time deps back to compile from runtime (known issue with maven-publish plugin)\n+ * </ul>\n+ */\n+ private static Closure fixupDependencies(Project project) {\n+ // TODO: remove this when enforcing gradle 2.14+, it now properly handles exclusions\n return { XmlProvider xml ->\n // first find if we have dependencies at all, and grab the node\n NodeList depsNodes = xml.asNode().get('dependencies')\n@@ -315,6 +322,15 @@ class BuildPlugin implements Plugin<Project> {\n String artifactId = depNode.get('artifactId').get(0).text()\n String version = depNode.get('version').get(0).text()\n \n+ // fix deps incorrectly marked as runtime back to compile time deps\n+ // see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4\n+ boolean isCompileDep = project.configurations.compile.allDependencies.find { dep ->\n+ dep.name == depNode.artifactId.text()\n+ }\n+ if (depNode.scope.text() == 'runtime' && isCompileDep) {\n+ depNode.scope*.value = 'compile'\n+ }\n+\n // collect the transitive deps now that we know what this dependency is\n String depConfig = transitiveDepConfigName(groupId, artifactId, version)\n Configuration configuration = project.configurations.findByName(depConfig)\n@@ -327,17 +343,10 @@ class BuildPlugin implements Plugin<Project> {\n continue\n }\n \n- // we now know we have something to exclude, so add the exclusion elements\n- Node exclusions = depNode.appendNode('exclusions')\n- for (ResolvedArtifact transitiveArtifact : artifacts) {\n- ModuleVersionIdentifier transitiveDep = transitiveArtifact.moduleVersion.id\n- if (transitiveDep.group == groupId && transitiveDep.name == artifactId) {\n- continue; // don't exclude the dependency itself!\n- }\n- Node exclusion = exclusions.appendNode('exclusion')\n- exclusion.appendNode('groupId', transitiveDep.group)\n- exclusion.appendNode('artifactId', transitiveDep.name)\n- }\n+ // we now know we have something to exclude, so add a wildcard exclusion element\n+ Node exclusion = depNode.appendNode('exclusions').appendNode('exclusion')\n+ exclusion.appendNode('groupId', '*')\n+ exclusion.appendNode('artifactId', '*')\n }\n }\n }\n@@ -349,7 +358,7 @@ class BuildPlugin implements Plugin<Project> {\n publications {\n all { MavenPublication publication -> // we only deal with maven\n // add exclusions to the pom directly, for each of the transitive deps of this project's deps\n- publication.pom.withXml(removeTransitiveDependencies(project))\n+ publication.pom.withXml(fixupDependencies(project))\n }\n }\n }", "filename": "buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy", "status": "modified" } ] }
{ "body": "As part of our improved CORS handling (https://github.com/elastic/elasticsearch/pull/16092), we serialize the `Access-Control-Allow-Headers` and `Access-Control-Allow-Methods` response headers using Netty, which, if given a set of values, produces a response header that looks like:\n\n```\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Headers: X-Requested-With\n```\n\ninstead of a comma-separated list in a single header value \n`Access-Control-Allow-Headers: Content-Type,X-Requested-With`\n\nSeparating each header value out individually _should_ be fine according to the RFC: http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2, although the RFC does leave some ambiguity if such a separation does need to be supported.\n\nIt turns out Chrome is fine with this but IE is not.\n", "comments": [ { "body": "Closed by 4566378cd58415f84bde2252b6b95f6b24e96dff (2.4) and 0fd150d5d9743250b27062c391ff77d8b726c51a (2.3.6)\n", "created_at": "2016-08-10T20:31:31Z" } ], "number": 19841, "title": "CORS multi-value response headers don't work in IE" }
{ "body": "Despite the RFC permitting multi-value response headers to appear as individual header fields instead of a single header field with a comma delimitted value, Internet Explorer does not deal well with multi-value response headers and only takes the last one it has found in the response. For example, if the response header contains:\n\n```\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Headers: X-Requested-With\n```\n\nInternet Explorer only processes the last one it read and considers the value of `Access-Control-Allow-Headers` to be `X-Requested-With` instead of both `Content-Type` and `X-Requested-With`. This is an artifact of how Netty3 serializes a collection of values for a response header.\n\nThis change ensures that multi-value CORS response headers are serialized as a single header field with a comma delimited value, which all browsers support. This also brings the implementation in 2.4 in conformity with how Netty4 handles multi-value headers, which is the default transport implementation for 5.x. So now, the CORS response will include all allowed headers (as specified in the `http.cors.allow-headers` setting) in a single response header `Access-Control-Allow-Headers` as a comma delimited list:\n\n```\nAccess-Control-Allow-Headers: Content-Type,X-Requested-With\n```\n\nCloses #19841\n", "number": 19872, "review_comments": [ { "body": "This is really two tests, one for uni-valued, and one for multi-valued. Can we just break it into two tests?\n", "created_at": "2016-08-10T17:18:34Z" } ], "title": "Single comma-delimited response header for multiple values" }
{ "commits": [ { "message": "Single comma-delimited response header for multiple values\n\nDespite the RFC permitting multi-value response headers\nto appear as individual header fields instead of a\nsingle header field with a comma delimitted value,\nInternet Explorer does not deal well with this and hence,\nthis commit ensures that multi-value CORS response\nheaders are serialized as a single header field with\na comma delimitted value. This also brings the\nimplementation in conformity with how Netty4 handles\nmulti-value headers, which is the default transport\nimplementation for 5.x.\n\nCloses #19841" }, { "message": "Split tests into two" } ], "files": [ { "diff": "@@ -228,16 +228,18 @@ private static boolean isPreflightRequest(final HttpRequest request) {\n headers.contains(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD);\n }\n \n- private void setAllowMethods(final HttpResponse response) {\n+ // package private for testing\n+ void setAllowMethods(final HttpResponse response) {\n Set<String> strMethods = new HashSet<>();\n for (HttpMethod method : config.allowedRequestMethods()) {\n strMethods.add(method.getName().trim());\n }\n- response.headers().set(ACCESS_CONTROL_ALLOW_METHODS, strMethods);\n+ response.headers().set(ACCESS_CONTROL_ALLOW_METHODS, Strings.collectionToCommaDelimitedString(strMethods));\n }\n \n- private void setAllowHeaders(final HttpResponse response) {\n- response.headers().set(ACCESS_CONTROL_ALLOW_HEADERS, config.allowedRequestHeaders());\n+ // package private for testing\n+ void setAllowHeaders(final HttpResponse response) {\n+ response.headers().set(ACCESS_CONTROL_ALLOW_HEADERS, Strings.collectionToCommaDelimitedString(config.allowedRequestHeaders()));\n }\n \n private void setMaxAge(final HttpResponse response) {", "filename": "core/src/main/java/org/elasticsearch/http/netty/cors/CorsHandler.java", "status": "modified" }, { "diff": "@@ -0,0 +1,75 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.http.netty.cors;\n+\n+import org.elasticsearch.common.Strings;\n+import org.elasticsearch.test.ESTestCase;\n+import org.jboss.netty.handler.codec.http.DefaultHttpResponse;\n+import org.jboss.netty.handler.codec.http.HttpMethod;\n+import org.jboss.netty.handler.codec.http.HttpResponse;\n+import org.jboss.netty.handler.codec.http.HttpVersion;\n+import org.junit.Test;\n+\n+import java.util.Set;\n+\n+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;\n+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;\n+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;\n+\n+/**\n+ * Tests for {@link CorsHandler}\n+ */\n+public class CorsHandlerTests extends ESTestCase {\n+\n+ @Test\n+ public void testSingleValueResponseHeaders() {\n+ CorsConfig corsConfig = new CorsConfigBuilder()\n+ .allowedRequestHeaders(\"content-type\")\n+ .allowedRequestMethods(HttpMethod.GET)\n+ .build();\n+ CorsHandler corsHandler = new CorsHandler(corsConfig);\n+ HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, OK);\n+ corsHandler.setAllowMethods(response);\n+ corsHandler.setAllowHeaders(response);\n+ assertEquals(\"content-type\", response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS));\n+ assertEquals(\"GET\", response.headers().get(ACCESS_CONTROL_ALLOW_METHODS));\n+ }\n+\n+ @Test\n+ public void testMultiValueResponseHeaders() {\n+ CorsConfig corsConfig = new CorsConfigBuilder()\n+ .allowedRequestHeaders(\"content-type,x-requested-with,accept\")\n+ .allowedRequestMethods(HttpMethod.GET, HttpMethod.POST)\n+ .build();\n+ CorsHandler corsHandler = new CorsHandler(corsConfig);\n+ HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, OK);\n+ corsHandler.setAllowMethods(response);\n+ corsHandler.setAllowHeaders(response);\n+ Set<String> responseHeadersSet = Strings.commaDelimitedListToSet(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS));\n+ assertEquals(3, responseHeadersSet.size());\n+ assertTrue(responseHeadersSet.contains(\"content-type\"));\n+ assertTrue(responseHeadersSet.contains(\"x-requested-with\"));\n+ assertTrue(responseHeadersSet.contains(\"accept\"));\n+ Set<String> responseMethodsSet = Strings.commaDelimitedListToSet(response.headers().get(ACCESS_CONTROL_ALLOW_METHODS));\n+ assertEquals(2, responseMethodsSet.size());\n+ assertTrue(responseMethodsSet.contains(\"GET\"));\n+ assertTrue(responseMethodsSet.contains(\"POST\"));\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/http/netty/cors/CorsHandlerTests.java", "status": "added" } ] }
{ "body": "Today you can create a blank field name, eg:\n\n```\nPUT t/t/1\n{\n \"\": 5\n}\n```\n\nOr\n\n```\nPUT t/t/1\n{\n \"\": {\n \"foo\": 5\n }\n}\n```\n\nBlank field names should be illegal as they are not accessible via paths\n\nRelated https://github.com/elastic/kibana/issues/7617\n", "comments": [], "number": 19251, "title": "Blank field names should be illegal" }
{ "body": "Add validation to Mapper constructor.\nAdd test cases\nAdd rest-api-test\n\nAdd only Mapper without Legacy*Mapper\nAnd we should discuss how to handle already exist indices that has blank field name when it is restored\n\nCloses #19251\n", "number": 19860, "review_comments": [ { "body": "I would distinguish the null check from the empty check, as a null simpleName means that we have a bug while an empty simpleName means the issue is on the user end. So maybe something like this:\n\n```\nObjects.requireNonNull(simpleName);\nif (simpleName.isEmpty()) {\n throw new IllegalArgumentException(\"name cannot be an empty string\");\n}\n```\n", "created_at": "2016-08-11T12:00:08Z" }, { "body": "let's not add leniency to this method, why did you need to add it?\n", "created_at": "2016-08-24T12:42:25Z" }, { "body": "why do we need to check that `context.indexCreatedVersion()` is not null? the version should always be available\n", "created_at": "2016-08-24T12:43:19Z" }, { "body": "Because BuilderContext.indexCreatedVersion() has Nullable annotation...\nShould we remove Nullable annotation in Mapper.BuilderContext?\n", "created_at": "2016-08-25T03:45:10Z" }, { "body": "Yes I think we should (but this belongs to a different change)\n", "created_at": "2016-08-25T06:03:21Z" }, { "body": "this should be alpha6 now that alpha5 is out\n", "created_at": "2016-08-26T08:32:52Z" } ], "title": "Validate blank field name" }
{ "commits": [ { "message": "Validate blank field name\n\nadd validation and validate only 5.0+\nAdd tests before 5.0\n\nCloses #19251" }, { "message": "Validate blank field name\n\nValidate only 5.0 alpha 6+ index only\n\nCloses #19251" } ], "files": [ { "diff": "@@ -246,6 +246,11 @@ protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldT\n super(simpleName);\n assert indexSettings != null;\n this.indexCreatedVersion = Version.indexCreated(indexSettings);\n+ if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_alpha6)) {\n+ if (simpleName.isEmpty()) {\n+ throw new IllegalArgumentException(\"name cannot be empty string\");\n+ }\n+ }\n fieldType.freeze();\n this.fieldType = fieldType;\n defaultFieldType.freeze();", "filename": "core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java", "status": "modified" }, { "diff": "@@ -22,15 +22,14 @@\n import org.elasticsearch.Version;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParseFieldMatcher;\n-import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.index.analysis.AnalysisService;\n-import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.index.query.QueryShardContext;\n import org.elasticsearch.index.similarity.SimilarityProvider;\n \n import java.util.Map;\n+import java.util.Objects;\n import java.util.function.Function;\n \n public abstract class Mapper implements ToXContent, Iterable<Mapper> {\n@@ -172,6 +171,7 @@ static class MultiFieldParserContext extends ParserContext {\n private final String simpleName;\n \n public Mapper(String simpleName) {\n+ Objects.requireNonNull(simpleName);\n this.simpleName = simpleName;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/mapper/Mapper.java", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import org.apache.lucene.search.TermQuery;\n import org.apache.lucene.util.BytesRef;\n import org.elasticsearch.ElasticsearchParseException;\n+import org.elasticsearch.Version;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.collect.CopyOnWriteHashMap;\n import org.elasticsearch.common.settings.Settings;\n@@ -161,7 +162,7 @@ public Y build(BuilderContext context) {\n \n protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic,\n Boolean includeInAll, Map<String, Mapper> mappers, @Nullable Settings settings) {\n- return new ObjectMapper(name, fullPath, enabled, nested, dynamic, includeInAll, mappers);\n+ return new ObjectMapper(name, fullPath, enabled, nested, dynamic, includeInAll, mappers, settings);\n }\n }\n \n@@ -320,8 +321,15 @@ protected static void parseProperties(ObjectMapper.Builder objBuilder, Map<Strin\n private volatile CopyOnWriteHashMap<String, Mapper> mappers;\n \n ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic,\n- Boolean includeInAll, Map<String, Mapper> mappers) {\n+ Boolean includeInAll, Map<String, Mapper> mappers, Settings settings) {\n super(name);\n+ assert settings != null;\n+ Version indexCreatedVersion = Version.indexCreated(settings);\n+ if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_alpha6)) {\n+ if (name.isEmpty()) {\n+ throw new IllegalArgumentException(\"name cannot be empty string\");\n+ }\n+ }\n this.fullPath = fullPath;\n this.enabled = enabled;\n this.nested = nested;", "filename": "core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java", "status": "modified" }, { "diff": "@@ -84,7 +84,7 @@ protected ObjectMapper createMapper(String name, String fullPath, boolean enable\n return new RootObjectMapper(name, enabled, dynamic, includeInAll, mappers,\n dynamicDateTimeFormatters,\n dynamicTemplates,\n- dateDetection, numericDetection);\n+ dateDetection, numericDetection, settings);\n }\n }\n \n@@ -170,8 +170,8 @@ protected boolean processField(RootObjectMapper.Builder builder, String fieldNam\n \n RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Boolean includeInAll, Map<String, Mapper> mappers,\n Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters, Explicit<DynamicTemplate[]> dynamicTemplates,\n- Explicit<Boolean> dateDetection, Explicit<Boolean> numericDetection) {\n- super(name, name, enabled, Nested.NO, dynamic, includeInAll, mappers);\n+ Explicit<Boolean> dateDetection, Explicit<Boolean> numericDetection, Settings settings) {\n+ super(name, name, enabled, Nested.NO, dynamic, includeInAll, mappers, settings);\n this.dynamicTemplates = dynamicTemplates;\n this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;\n this.dateDetection = dateDetection;", "filename": "core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java", "status": "modified" }, { "diff": "@@ -96,9 +96,11 @@\n import org.elasticsearch.index.translog.TranslogConfig;\n import org.elasticsearch.indices.IndicesModule;\n import org.elasticsearch.indices.mapper.MapperRegistry;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.DummyShardLock;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.IndexSettingsModule;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.hamcrest.MatcherAssert;\n@@ -114,6 +116,7 @@\n import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.Base64;\n+import java.util.Collection;\n import java.util.Collections;\n import java.util.List;\n import java.util.Locale;\n@@ -1808,7 +1811,8 @@ public void testSkipTranslogReplay() throws IOException {\n }\n \n private Mapping dynamicUpdate() {\n- BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath());\n+ BuilderContext context = new BuilderContext(\n+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), new ContentPath());\n final RootObjectMapper root = new RootObjectMapper.Builder(\"some_type\").build(context);\n return new Mapping(Version.CURRENT, root, new MetadataFieldMapper[0], emptyMap());\n }", "filename": "core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java", "status": "modified" }, { "diff": "@@ -20,29 +20,43 @@\n package org.elasticsearch.index.mapper;\n \n import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.compress.CompressorFactory;\n import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.mapper.BinaryFieldMapper;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.ParsedDocument;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n \n import java.io.IOException;\n import java.util.Arrays;\n+import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.instanceOf;\n \n /**\n */\n public class BinaryFieldMapperTests extends ESSingleNodeTestCase {\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaultMapping() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\")\n@@ -92,4 +106,23 @@ public void testStoredValue() throws IOException {\n assertEquals(new BytesArray(value), originalValue);\n }\n }\n+\n+ public void testEmptyName() throws IOException {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"binary\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> createIndex(\"test\").mapperService().documentMapperParser().parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+\n+ DocumentMapper defaultMapper = createIndex(\"test_old\", oldIndexSettings).mapperService().documentMapperParser().parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -47,11 +47,15 @@\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n+import static org.hamcrest.Matchers.containsString;\n+\n public class BooleanFieldMapperTests extends ESSingleNodeTestCase {\n \n IndexService indexService;\n@@ -214,4 +218,25 @@ public void testBwCompatDocValues() throws Exception {\n assertEquals(DocValuesType.NONE, LegacyStringMappingTests.docValuesType(doc, \"bool2\"));\n assertEquals(DocValuesType.SORTED_NUMERIC, LegacyStringMappingTests.docValuesType(doc, \"bool3\"));\n }\n+\n+ public void testEmptyName() throws IOException {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"boolean\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -23,10 +23,13 @@\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.json.JsonXContent;\n+import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.CompletionFieldMapper2x;\n import org.elasticsearch.index.mapper.DocumentMapper;\n+import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n@@ -39,6 +42,7 @@\n \n import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.is;\n \n@@ -134,4 +138,19 @@ public void testThatSerializationCombinesToOneAnalyzerFieldIfBothAreEqual() thro\n assertThat(configMap.get(\"analyzer\").toString(), is(\"simple\"));\n }\n \n+ public void testEmptyName() throws IOException {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"completion\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ DocumentMapper defaultMapper = createIndex(\"test\",\n+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, PRE2X_VERSION.id).build())\n+ .mapperService().documentMapperParser().parse(\"type\", new CompressedXContent(mapping));\n+\n+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper(\"\");\n+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper2x.class));\n+\n+ CompletionFieldMapper2x completionFieldMapper = (CompletionFieldMapper2x) fieldMapper;\n+ assertThat(completionFieldMapper.isStoringPayloads(), is(false));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapper2xTests.java", "status": "modified" }, { "diff": "@@ -35,9 +35,11 @@\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.json.JsonXContent;\n+import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.analysis.NamedAnalyzer;\n import org.elasticsearch.index.mapper.CompletionFieldMapper;\n import org.elasticsearch.index.mapper.DocumentMapper;\n+import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MappedFieldType;\n import org.elasticsearch.index.mapper.MapperParsingException;\n@@ -427,4 +429,17 @@ private static void assertSuggestFields(IndexableField[] fields, int expected) {\n }\n assertThat(actualFieldCount, equalTo(expected));\n }\n+\n+ public void testEmptyName() throws IOException {\n+ IndexService indexService = createIndex(\"test\");\n+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"completion\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -21,18 +21,29 @@\n \n import org.apache.lucene.index.DocValuesType;\n import org.apache.lucene.index.IndexableField;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n+import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.ParsedDocument;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n+import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n \n public class DateFieldMapperTests extends ESSingleNodeTestCase {\n@@ -46,6 +57,11 @@ public void before() {\n parser = indexService.mapperService().documentMapperParser();\n }\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaults() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\").startObject(\"field\").field(\"type\", \"date\").endObject().endObject()\n@@ -317,4 +333,25 @@ public void testNullConfigValuesFail() throws MapperParsingException, IOExceptio\n Exception e = expectThrows(MapperParsingException.class, () -> parser.parse(\"type\", new CompressedXContent(mapping)));\n assertEquals(\"[format] must not have a [null] value\", e.getMessage());\n }\n+\n+ public void testEmptyName() throws IOException {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"date\")\n+ .field(\"format\", \"epoch_second\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().toString());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -23,21 +23,26 @@\n import java.nio.charset.StandardCharsets;\n import java.util.ArrayList;\n import java.util.Arrays;\n+import java.util.Collection;\n import java.util.Collections;\n import java.util.HashSet;\n import java.util.List;\n import java.util.Set;\n \n import org.apache.lucene.index.IndexableField;\n import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.ParseContext.Document;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n \n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;\n@@ -48,6 +53,11 @@\n // TODO: make this a real unit test\n public class DocumentParserTests extends ESSingleNodeTestCase {\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testTypeDisabled() throws Exception {\n DocumentMapperParser mapperParser = createIndex(\"test\").mapperService().documentMapperParser();\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n@@ -183,7 +193,8 @@ DocumentMapper createDummyMapping(MapperService mapperService) throws Exception\n \n // creates an object mapper, which is about 100x harder than it should be....\n ObjectMapper createObjectMapper(MapperService mapperService, String name) throws Exception {\n- ParseContext context = new ParseContext.InternalParseContext(Settings.EMPTY,\n+ ParseContext context = new ParseContext.InternalParseContext(\n+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(),\n mapperService.documentMapperParser(), mapperService.documentMapper(\"type\"), null, null);\n String[] nameParts = name.split(\"\\\\.\");\n for (int i = 0; i < nameParts.length - 1; ++i) {", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java", "status": "modified" }, { "diff": "@@ -48,6 +48,7 @@\n import java.util.Map;\n import java.lang.NumberFormatException;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;\n import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n@@ -893,4 +894,29 @@ public void testMultiField() throws Exception {\n searchResponse = client().prepareSearch().addStoredField(\"location.latlon\").setQuery(matchAllQuery()).execute().actionGet();\n assertEquals(numDocs, searchResponse.getHits().totalHits());\n }\n+\n+\n+ public void testEmptyName() throws Exception {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"geo_point\").field(\"lat_lon\", true).endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ Version version = Version.CURRENT;\n+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();\n+ DocumentMapperParser parser = createIndex(\"test\", settings).mapperService().documentMapperParser();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ DocumentMapperParser parser2x = createIndex(\"test_old\", oldIndexSettings).mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser2x.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -22,24 +22,38 @@\n import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;\n import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;\n import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.geo.GeoUtils;\n import org.elasticsearch.common.geo.builders.ShapeBuilder;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.GeoShapeFieldMapper;\n import org.elasticsearch.index.mapper.MapperService;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n \n import java.io.IOException;\n+import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.instanceOf;\n \n public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase {\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaultConfiguration() throws IOException {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type1\")\n .startObject(\"properties\").startObject(\"location\")\n@@ -423,4 +437,30 @@ public void testGeoShapeMapperMerge() throws Exception {\n assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(1d)));\n assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(ShapeBuilder.Orientation.CW));\n }\n+\n+ public void testEmptyName() throws Exception {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type1\")\n+ .startObject(\"properties\").startObject(\"\")\n+ .field(\"type\", \"geo_shape\")\n+ .endObject().endObject()\n+ .endObject().endObject().string();\n+ DocumentMapperParser parser = createIndex(\"test\").mapperService().documentMapperParser();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type1\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ DocumentMapperParser parser2x = createIndex(\"test_old\", oldIndexSettings).mapperService().documentMapperParser();\n+\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> parser2x.parse(\"type1\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"fieldName is required\"));\n+ }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -23,8 +23,11 @@\n import org.apache.lucene.index.DocValuesType;\n import org.apache.lucene.index.IndexableField;\n import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.network.InetAddresses;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n@@ -34,13 +37,18 @@\n import org.elasticsearch.index.mapper.IpFieldMapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.ParsedDocument;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n \n import java.io.IOException;\n import java.net.InetAddress;\n+import java.util.Collection;\n \n public class IpFieldMapperTests extends ESSingleNodeTestCase {\n \n@@ -53,6 +61,11 @@ public void before() {\n parser = indexService.mapperService().documentMapperParser();\n }\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaults() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\").startObject(\"field\").field(\"type\", \"ip\").endObject().endObject()\n@@ -290,4 +303,24 @@ public void testSerializeDefaults() throws Exception {\n assertTrue(got, got.contains(\"\\\"ignore_malformed\\\":false\"));\n assertTrue(got, got.contains(\"\\\"include_in_all\\\":false\"));\n }\n+\n+ public void testEmptyName() throws IOException {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"ip\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -28,20 +28,26 @@\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n+import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.ParsedDocument;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n import java.util.Arrays;\n import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n \n public class KeywordFieldMapperTests extends ESSingleNodeTestCase {\n@@ -77,7 +83,7 @@ public void testDefaults() throws Exception {\n \n IndexableField[] fields = doc.rootDoc().getFields(\"field\");\n assertEquals(2, fields.length);\n- \n+\n assertEquals(new BytesRef(\"1234\"), fields[0].binaryValue());\n IndexableFieldType fieldType = fields[0].fieldType();\n assertThat(fieldType.omitNorms(), equalTo(true));\n@@ -304,4 +310,25 @@ public void testEnableNorms() throws IOException {\n assertEquals(2, fields.length);\n assertFalse(fields[0].fieldType().omitNorms());\n }\n+\n+ public void testEmptyName() throws IOException {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"keyword\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -21,21 +21,32 @@\n \n import org.apache.lucene.index.DocValuesType;\n import org.apache.lucene.index.IndexableField;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n+import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.ParsedDocument;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n import java.util.Arrays;\n+import java.util.Collection;\n import java.util.HashSet;\n import java.util.Set;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n \n public class NumberFieldMapperTests extends ESSingleNodeTestCase {\n@@ -57,6 +68,11 @@ public void testDefaults() throws Exception {\n }\n }\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void doTestDefaults(String type) throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\").startObject(\"field\").field(\"type\", type).endObject().endObject()\n@@ -377,4 +393,32 @@ private void doTestNullValue(String type) throws IOException {\n assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());\n assertFalse(dvField.fieldType().stored());\n }\n+\n+ public void testEmptyName() throws IOException {\n+ // after version 5\n+ for (String type : TYPES) {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", type).endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+ }\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+ for (String type : TYPES) {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", type).endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -19,17 +19,26 @@\n \n package org.elasticsearch.index.mapper;\n \n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.mapper.DocumentMapper;\n+import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.MapperService.MergeReason;\n import org.elasticsearch.index.mapper.ObjectMapper.Dynamic;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n \n import java.io.IOException;\n+import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n \n public class ObjectMapperTests extends ESSingleNodeTestCase {\n@@ -182,4 +191,36 @@ public void testMerge() throws IOException {\n assertFalse(mapper.root().includeInAll());\n assertEquals(Dynamic.STRICT, mapper.root().dynamic());\n }\n+\n+ public void testEmptyName() throws Exception {\n+ String mapping = XContentFactory.jsonBuilder()\n+ .startObject()\n+ .startObject(\"\")\n+ .startObject(\"properties\")\n+ .startObject(\"name\")\n+ .field(\"type\", \"text\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {\n+ createIndex(\"test\").mapperService().documentMapperParser().parse(\"\", new CompressedXContent(mapping));\n+ });\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ DocumentMapperParser parser = createIndex(\"test_old\", oldIndexSettings).mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java", "status": "modified" }, { "diff": "@@ -21,18 +21,29 @@\n \n import org.apache.lucene.index.DocValuesType;\n import org.apache.lucene.index.IndexableField;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n+import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.ParsedDocument;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n+import java.util.Collection;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n \n public class ScaledFloatFieldMapperTests extends ESSingleNodeTestCase {\n@@ -46,6 +57,11 @@ public void before() {\n parser = indexService.mapperService().documentMapperParser();\n }\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaults() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\").startObject(\"field\").field(\"type\", \"scaled_float\")\n@@ -336,4 +352,27 @@ public void testNullValue() throws IOException {\n assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());\n assertFalse(dvField.fieldType().stored());\n }\n+\n+ public void testEmptyName() throws IOException {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\")\n+ .field(\"type\", \"scaled_float\")\n+ .field(\"scaling_factor\", 10.0).endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().toString());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -28,29 +28,38 @@\n import org.apache.lucene.index.Term;\n import org.apache.lucene.index.TermsEnum;\n import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.engine.Engine;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n+import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.MapperService.MergeReason;\n import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;\n import org.elasticsearch.index.mapper.ParsedDocument;\n import org.elasticsearch.index.mapper.TextFieldMapper;\n import org.elasticsearch.index.shard.IndexShard;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n+import java.util.Collection;\n import java.util.Collections;\n import java.util.Arrays;\n import java.util.HashMap;\n import java.util.Map;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n \n@@ -65,6 +74,11 @@ public void before() {\n parser = indexService.mapperService().documentMapperParser();\n }\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaults() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\").startObject(\"field\").field(\"type\", \"text\").endObject().endObject()\n@@ -549,4 +563,25 @@ public void testAnalyzedFieldPositionIncrementWithoutPositions() throws IOExcept\n assertEquals(\"Cannot set position_increment_gap on field [field] without positions enabled\", e.getMessage());\n }\n }\n+\n+ public void testEmptyName() throws IOException {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"text\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ parser = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -24,17 +24,29 @@\n import org.apache.lucene.analysis.MockTokenizer;\n import org.apache.lucene.analysis.Token;\n import org.apache.lucene.analysis.TokenStream;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n+import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.mapper.TokenCountFieldMapper;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n+import org.junit.Before;\n \n import java.io.IOException;\n import java.util.Arrays;\n+import java.util.Collection;\n import java.util.Collections;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n \n /**\n@@ -94,4 +106,31 @@ public TokenStreamComponents createComponents(String fieldName) {\n };\n assertThat(TokenCountFieldMapper.countPositions(analyzer, \"\", \"\"), equalTo(7));\n }\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n+ public void testEmptyName() throws IOException {\n+ IndexService indexService = createIndex(\"test\");\n+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"text\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ indexService = createIndex(\"test_old\", oldIndexSettings);\n+ DocumentMapperParser parser2x = indexService.mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser2x.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -18,16 +18,26 @@\n */\n package org.elasticsearch.indexing;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.DocWriteResponse;\n import org.elasticsearch.action.bulk.BulkResponse;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.index.IndexResponse;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.VersionType;\n+import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.indices.InvalidIndexNameException;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESIntegTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n \n import java.util.ArrayList;\n+import java.util.Collection;\n+import java.util.Collections;\n import java.util.List;\n import java.util.Locale;\n import java.util.Random;\n@@ -36,7 +46,9 @@\n import java.util.concurrent.Executors;\n import java.util.concurrent.atomic.AtomicIntegerArray;\n \n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.lessThanOrEqualTo;\n \n@@ -227,4 +239,27 @@ public void testInvalidIndexName() {\n }\n }\n \n+ public void testDocumentWithBlankFieldName() {\n+ MapperParsingException e = expectThrows(MapperParsingException.class, () -> {\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"\", \"value1_2\").execute().actionGet();\n+ }\n+ );\n+ assertThat(e.getMessage(), containsString(\"failed to parse\"));\n+ assertThat(e.getRootCause().getMessage(), containsString(\"name cannot be empty string\"));\n+ }\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> nodePlugins() {\n+ return Collections.singleton(InternalSettingsPlugin.class); // uses index.version.created\n+ }\n+\n+ public void testDocumentWithBlankFieldName2x() {\n+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_4);\n+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();\n+ assertAcked(prepareCreate(\"test1\").setSettings(settings));\n+ ensureGreen();\n+\n+ IndexResponse indexResponse = client().prepareIndex(\"test1\", \"type\", \"1\").setSource(\"\", \"value1_2\").execute().actionGet();\n+ assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java", "status": "modified" }, { "diff": "@@ -21,6 +21,8 @@\n \n import org.apache.lucene.search.SortField;\n import org.apache.lucene.util.Accountable;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.ParseFieldMatcher;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.common.io.stream.BytesStreamOutput;\n@@ -218,7 +220,8 @@ public void testEqualsAndHashcode() throws IOException {\n \n protected QueryShardContext createMockShardContext() {\n Index index = new Index(randomAsciiOfLengthBetween(1, 10), \"_na_\");\n- IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY);\n+ IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index,\n+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());\n IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null);\n IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings(\"test\", Settings.EMPTY),\n cache, null, null);\n@@ -241,8 +244,8 @@ public MappedFieldType fieldMapper(String name) {\n \n @Override\n public ObjectMapper getObjectMapper(String name) {\n- BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath());\n- return new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context);\n+ BuilderContext context = new BuilderContext(this.getIndexSettings().getSettings(), new ContentPath());\n+ return (ObjectMapper) new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context);\n }\n };\n }", "filename": "core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java", "status": "modified" }, { "diff": "@@ -262,3 +262,7 @@ IMPORTANT: Keep in mind that deleting documents from an index is very expensive\n compared to deleting whole indexes. That is why time based indexes are\n recommended over this sort of thing and why `_ttl` was deprecated in the first\n place.\n+\n+==== Blank field names is not supported\n+\n+Blank field names in mappings is not allowed after 5.0.", "filename": "docs/reference/migration/migrate_5_0/mapping.asciidoc", "status": "modified" }, { "diff": "@@ -33,12 +33,15 @@\n import org.apache.lucene.search.TermQuery;\n import org.apache.lucene.search.TermRangeQuery;\n import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n+import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.mapper.ParseContext;\n@@ -56,6 +59,8 @@\n import org.elasticsearch.indices.TermsLookup;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.io.IOException;\n@@ -65,6 +70,7 @@\n import java.util.Collections;\n import java.util.List;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;\n@@ -91,7 +97,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {\n \n @Override\n protected Collection<Class<? extends Plugin>> getPlugins() {\n- return Collections.singleton(PercolatorPlugin.class);\n+ return pluginList(InternalSettingsPlugin.class, PercolatorPlugin.class);\n }\n \n @Before\n@@ -454,4 +460,26 @@ private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws I\n QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser);\n assertThat(qsc.parseInnerQueryBuilder().get(), equalTo(expected));\n }\n+\n+\n+ public void testEmptyName() throws Exception {\n+ // after 5.x\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type1\")\n+ .startObject(\"properties\").startObject(\"\").field(\"type\", \"percolator\").endObject().endObject()\n+ .endObject().endObject().string();\n+ DocumentMapperParser parser = mapperService.documentMapperParser();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type1\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ DocumentMapperParser parser2x = createIndex(\"test_old\", oldIndexSettings).mapperService().documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser2x.parse(\"type1\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -19,6 +19,8 @@\n \n package org.elasticsearch.mapper.attachments;\n \n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.settings.Settings;\n@@ -28,7 +30,9 @@\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.mapper.ParseContext;\n+import org.elasticsearch.test.VersionUtils;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;\n import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;\n@@ -125,4 +129,33 @@ public void testMapperErrorWithDotTwoLevels169() throws Exception {\n assertFalse(docMapper.mapping().toString().contains(\".\"));\n }\n \n+ public void testEmptyName() throws Exception {\n+ XContentBuilder mappingBuilder = jsonBuilder();\n+ mappingBuilder.startObject()\n+ .startObject(\"mail\")\n+ .startObject(\"properties\")\n+ .startObject(\"\")\n+ .field(\"type\", \"attachment\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+ String mapping = mappingBuilder.string();\n+ MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper());\n+ DocumentMapperParser parser = mapperService.documentMapperParser();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"mail\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ MapperService mapperService2x = MapperTestUtils.newMapperService(createTempDir(), oldIndexSettings, getIndicesModuleWithRegisteredAttachmentMapper());\n+ DocumentMapperParser parser2x = mapperService2x.documentMapperParser();\n+\n+ DocumentMapper defaultMapper = parser2x.parse(\"mail\", new CompressedXContent(mapping));\n+ assertThat(defaultMapper.mappingSource().string(), startsWith(\"{\\\"mail\\\":{\\\"properties\\\":{\\\"\\\":{\\\"type\\\":\\\"attachment\\\"\"));\n+ }\n }", "filename": "plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java", "status": "modified" }, { "diff": "@@ -22,20 +22,30 @@\n import org.apache.lucene.index.DocValuesType;\n import org.apache.lucene.index.IndexOptions;\n import org.apache.lucene.index.IndexableField;\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.ParsedDocument;\n import org.elasticsearch.indices.mapper.MapperRegistry;\n+import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+import org.elasticsearch.test.VersionUtils;\n import org.junit.Before;\n \n import java.util.Arrays;\n+import java.util.Collection;\n import java.util.Collections;\n \n+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;\n+import static org.hamcrest.Matchers.containsString;\n+\n public class Murmur3FieldMapperTests extends ESSingleNodeTestCase {\n \n MapperRegistry mapperRegistry;\n@@ -52,6 +62,11 @@ public void before() {\n indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);\n }\n \n+ @Override\n+ protected Collection<Class<? extends Plugin>> getPlugins() {\n+ return pluginList(InternalSettingsPlugin.class);\n+ }\n+\n public void testDefaults() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n .startObject(\"properties\").startObject(\"field\")\n@@ -120,4 +135,27 @@ public void testIndexSettingNotAllowed() throws Exception {\n assertTrue(e.getMessage().contains(\"Setting [index] cannot be modified\"));\n }\n }\n+\n+ public void testEmptyName() throws Exception {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"\")\n+ .field(\"type\", \"murmur3\")\n+ .endObject().endObject().endObject().endObject().string();\n+\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> parser.parse(\"type\", new CompressedXContent(mapping))\n+ );\n+ assertThat(e.getMessage(), containsString(\"name cannot be empty string\"));\n+\n+ // before 5.x\n+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);\n+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();\n+ IndexService indexService2x = createIndex(\"test_old\", oldIndexSettings);\n+\n+ DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), indexService2x.analysisService(),\n+ indexService2x.similarityService(), mapperRegistry, indexService2x::newQueryShardContext);\n+\n+ DocumentMapper defaultMapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, defaultMapper.mappingSource().string());\n+ }\n }", "filename": "plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java", "status": "modified" }, { "diff": "@@ -95,3 +95,26 @@\n - match: {test_index.aliases.test_clias.filter.term.field: value}\n - is_false: test_index.aliases.test_clias.index_routing\n - is_false: test_index.aliases.test_clias.search_routing\n+\n+---\n+\"Create index with no type mappings\":\n+ - do:\n+ catch: /illegal_argument_exception/\n+ indices.create:\n+ index: test_index\n+ body:\n+ mappings:\n+ \"\" : {}\n+\n+---\n+\"Create index with invalid mappings\":\n+ - do:\n+ catch: /illegal_argument_exception/\n+ indices.create:\n+ index: test_index\n+ body:\n+ mappings:\n+ test_type:\n+ properties:\n+ \"\":\n+ type: keyword", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml", "status": "modified" }, { "diff": "@@ -51,3 +51,19 @@\n - match: {test_index.mappings.test_type.properties.text1.type: text}\n - match: {test_index.mappings.test_type.properties.subfield.properties.text3.type: text}\n - match: {test_index.mappings.test_type.properties.text1.fields.text_raw.type: keyword}\n+\n+---\n+\"Create index with invalid mappings\":\n+ - do:\n+ indices.create:\n+ index: test_index\n+ - do:\n+ catch: /illegal_argument_exception/\n+ indices.put_mapping:\n+ index: test_index\n+ type: test_type\n+ body:\n+ test_type:\n+ properties:\n+ \"\":\n+ type: keyword", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml", "status": "modified" } ] }
{ "body": "POST http://localhost:9200/searchaggtests/searchaggtest/_search?&search_type=count HTTP/1.1\nContent-Type: application/json\nHost: localhost:9200\nContent-Length: 289\nExpect: 100-continue\n\n{\"aggs\":{\"testHistogramBucketAggregation\":{\"date_histogram\":{\"field\":\"dateofdetails\",\"interval\":\"1M\",\"format\":\"yyyy-MM-dd\",\"offset\":\"1d\",\"time_zone\":\"+02:00\",\"order\":{\"dateofdetails\":\"desc\"},\"min_doc_count\":2,\"extended_bounds\":{\"min\":0,\"max\":1000000}},\"aggs\":{\"tophits\":{\"top_hits\":{}}}}}}\n## Result:\n\nHTTP/1.1 503 Service Unavailable\nContent-Type: application/json; charset=UTF-8\nContent-Length: 293\n\n{\"error\":{\"root_cause\":[],\"type\":\"reduce_search_phase_exception\",\"reason\":\"[reduce] \",\"phase\":\"query\",\"grouped\":true,\"failed_shards\":[],\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"Invalid order path [dateofdetails]. Cannot find aggregation named [dateofdetails]\"}},\"status\":503}\n\nGreetings Damien\n", "comments": [ { "body": "This is weird for a few reasons... \n- you can't sort on fields, only on the result of aggregations so the exception is correct\n- it shouldn't be returning a 503 response though\n- when I try it (full example below) on 2.0, I don't get this exception, which i should\n \n POST _bulk\n {\"index\":{\"_index\":\"searchtests\",\"_type\":\"searchtest\",\"_id\":1}}\n {\"id\":1,\"name\":\"one\",\"details\":\"This is the details of the document, very interesting\",\"circletest\":{\"type\":\"circle\",\"coordinates\":[45,45],\"radius\":\"100m\"},\"location\":[45,45],\"lift\":2.9,\"dateofdetails\":\"2015-10-26T19:19:38.5961045Z\"}\n {\"index\":{\"_index\":\"searchtests\",\"_type\":\"searchtest\",\"_id\":2}}\n {\"id\":2,\"name\":\"two\",\"details\":\"Details of the document two, leave it alone\",\"circletest\":{\"type\":\"circle\",\"coordinates\":[46,45],\"radius\":\"50m\"},\"location\":[46,45],\"lift\":2.5,\"dateofdetails\":\"2015-04-20T19:19:38.5961045Z\"}\n {\"index\":{\"_index\":\"searchtests\",\"_type\":\"searchtest\",\"_id\":3}}\n {\"id\":3,\"name\":\"three\",\"details\":\"This data is different\",\"circletest\":{\"type\":\"circle\",\"coordinates\":[37,42],\"radius\":\"80m\"},\"location\":[37,42],\"lift\":2.1,\"dateofdetails\":\"2015-10-12T19:19:38.5961045Z\"}\n\nFull example:\n\n```\nPOST searchtests/_search?search_type=count\n{\n \"aggs\": {\n \"testHistogramBucketAggregation\": {\n \"date_histogram\": {\n \"field\": \"dateofdetails\",\n \"interval\": \"1M\",\n \"format\": \"yyyy-MM-dd\",\n \"offset\": \"1d\",\n \"time_zone\": \"+02:00\",\n \"order\": {\n \"dateofdetails\": \"desc\"\n },\n \"min_doc_count\": 2,\n \"extended_bounds\": {\n \"min\": 0,\n \"max\": 1000000\n }\n },\n \"aggs\": {\n \"tophits\": {\n \"top_hits\": {}\n }\n }\n }\n }\n}\n```\n\n@colings86 any ideas?\n", "created_at": "2015-11-18T13:24:39Z" }, { "body": "I get the same as @clintongormley.\n\nThis is a bug because an exception should be thrown and the status code returned should be 400 (bad request).\n", "created_at": "2015-11-18T14:15:21Z" }, { "body": "As of #22343 `histogram` and `date_histogram` aggregation order will now be validated during the shard search phase. We now get the same validation errors as the `terms` agg when an invalid sub-aggregation is given in the order.\r\n```\r\nPUT i\r\n\r\nPOST i/_search\r\n{\r\n \"aggs\": {\r\n \"a\": {\r\n \"date_histogram\": {\r\n \"field\": \"f\", \r\n \"interval\": \"day\", \r\n \"order\": {\r\n \"wrong\": \"asc\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nresponse:\r\n{\r\n \"error\": {\r\n \"root_cause\": [\r\n {\r\n \"type\": \"aggregation_execution_exception\",\r\n \"reason\": \"Invalid aggregator order path [wrong]. Unknown aggregation [wrong]\"\r\n }\r\n ],\r\n \"type\": \"search_phase_execution_exception\",\r\n \"reason\": \"all shards failed\",\r\n \"phase\": \"query\",\r\n \"grouped\": true,\r\n \"failed_shards\": [\r\n {\r\n \"shard\": 0,\r\n \"index\": \"i\",\r\n \"node\": \"jUa3xSiEQTayFeYCYHt2jg\",\r\n \"reason\": {\r\n \"type\": \"aggregation_execution_exception\",\r\n \"reason\": \"Invalid aggregator order path [wrong]. Unknown aggregation [wrong]\"\r\n }\r\n }\r\n ]\r\n },\r\n \"status\": 500\r\n}\r\n```\r\nThe `order` code was refactored in #22343 so it will now be easier to move order validation for all aggregations to the query parsing phase as a follow up PR.", "created_at": "2017-05-14T02:43:55Z" }, { "body": "@elastic/es-search-aggs ", "created_at": "2018-03-22T17:02:27Z" }, { "body": "Actually, closing in favor of #20003\r\n\r\nSorry for the noise :)", "created_at": "2018-03-22T17:20:07Z" } ], "number": 14771, "title": "Order not working inside a date_histogram aggs ES 2.0" }
{ "body": "Fix wrong status code for `SearchPhaseExecutionException`.\nUpdated `SearchPhaseExecutionException` to determine the status from the cause if one exists and there were no shard failures.\n\nThrow exception if (date) histogram order path is invalid and there is only one bucket.\nForced check of sub-aggregation names and fields used in (date) histogram order path if there is only one bucket. The previous code relied on the sorting code (bypassed if less than 2 buckets) to do this check. Ideally these checks should be performed during parsing instead of the reduce phase.\n\nTests pass: `gradle test` and `gradle core:integTest`\n\nCloses #14771\n", "number": 19851, "review_comments": [ { "body": "Currently if the (date) histogram is ordered by sub-aggregation(s), the order path is validated during the reduce phase. This check occurs implicitly in the [comparator](https://github.com/elastic/elasticsearch/blob/master/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java#L67) when the histogram buckets are sorted. However if there are 0 or 1 buckets, there is nothing to sort so this code was bypassed. I added a hack here to catch the case with 1 bucket. To catch the case with 0 buckets, the validation code needs to be refactored to run during the query parsing phase (if possible). This would require parsing all sub-aggregations first and then validating the order path.\n", "created_at": "2016-08-08T21:44:02Z" }, { "body": "Are there any tests for this method?\nIt looks like the code only expects a SearchPhaseExecutionException only when there are shard failure(s). The bug was that an exception occurred on the coordinating node performing reduce, which resulted in this code assuming all shards failed (503 status instead of the expected root cause status).\n", "created_at": "2016-08-08T21:48:18Z" }, { "body": "Lots of duplicated code between Histogram and DateHistogram :(\n", "created_at": "2016-08-08T21:49:17Z" } ], "title": "Fix wrong status code for SearchPhaseExecutionException" }
{ "commits": [ { "message": "Fix wrong status code for SearchPhaseExecutionException.\nUpdated SearchPhaseExecutionException to determine the status from the cause if one exists and there were no shard failures.\n\nThrow exception if (date) histogram order path is invalid and there is only one bucket.\nForced check of sub-aggregation names and fields used in (date) histogram order path if there is only one bucket. The previous code relied on the sorting code (bypassed if less than 2 buckets) to do this check. Ideally these checks should be performed during parsing instead of the reduce phase.\n\nCloses #14771" } ], "files": [ { "diff": "@@ -85,8 +85,15 @@ private static Throwable deduplicateCause(Throwable cause, ShardSearchFailure[]\n @Override\n public RestStatus status() {\n if (shardFailures.length == 0) {\n- // if no successful shards, it means no active shards, so just return SERVICE_UNAVAILABLE\n- return RestStatus.SERVICE_UNAVAILABLE;\n+ // no successful shard responses or no shard failures\n+ Throwable cause = super.getCause();\n+ if (cause == null) {\n+ // if no successful shards, it means no active shards, so just return SERVICE_UNAVAILABLE\n+ return RestStatus.SERVICE_UNAVAILABLE;\n+ } else {\n+ // no shard failures: exception on node performing reduce\n+ return ExceptionsHelper.status(cause);\n+ }\n }\n RestStatus status = shardFailures[0].status();\n if (shardFailures.length > 1) {\n@@ -150,7 +157,7 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO\n @Override\n protected void causeToXContent(XContentBuilder builder, Params params) throws IOException {\n if (super.getCause() != null) {\n- // if the cause is null we inject a guessed root cause that will then be rendered twice so wi disable it manually\n+ // if the cause is null we inject a guessed root cause that will then be rendered twice so we disable it manually\n super.causeToXContent(builder, params);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java", "status": "modified" }, { "diff": "@@ -149,6 +149,7 @@ static InternalOrder resolveOrder(String key, boolean asc) {\n if (\"_count\".equals(key)) {\n return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);\n }\n+ // TODO check for valid sub-aggregation names and fields here instead of reduce phase.\n return new InternalOrder.Aggregation(key, asc);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java", "status": "modified" }, { "diff": "@@ -141,6 +141,7 @@ static InternalOrder resolveOrder(String key, boolean asc) {\n if (\"_count\".equals(key)) {\n return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);\n }\n+ // TODO check for valid sub-aggregation names and fields here instead of reduce phase.\n return new InternalOrder.Aggregation(key, asc);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java", "status": "modified" }, { "diff": "@@ -396,6 +396,12 @@ public InternalAggregation doReduce(List<InternalAggregation> aggregations, Redu\n } else {\n // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort\n CollectionUtil.introSort(reducedBuckets, order.comparator());\n+ if (reducedBuckets.size() == 1) {\n+ // hack: force check of sub-aggregation names and fields if there is only 1 bucket (sort code bypassed)\n+ // TODO check for valid sub-aggregation names and fields during parsing instead of reduce phase\n+ Bucket b = reducedBuckets.get(0);\n+ order.comparator().compare(b, b);\n+ }\n }\n \n return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo,", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java", "status": "modified" }, { "diff": "@@ -379,6 +379,12 @@ public InternalAggregation doReduce(List<InternalAggregation> aggregations, Redu\n } else {\n // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort\n CollectionUtil.introSort(reducedBuckets, order.comparator());\n+ if (reducedBuckets.size() == 1) {\n+ // hack: force check of sub-aggregation names and fields if there is only 1 bucket (sort code bypassed)\n+ // TODO check for valid sub-aggregation names and fields during parsing instead of reduce phase\n+ Bucket b = reducedBuckets.get(0);\n+ order.comparator().compare(b, b);\n+ }\n }\n \n return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(),", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.search.aggregations.bucket;\n \n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.common.joda.DateMathParser;\n@@ -66,6 +67,7 @@\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.is;\n import static org.hamcrest.core.IsNull.notNullValue;\n \n@@ -471,6 +473,23 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception\n }\n }\n \n+ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception {\n+ try {\n+ client().prepareSearch(\"idx\")\n+ .addAggregation(dateHistogram(\"histo\")\n+ .field(\"date\")\n+ .dateHistogramInterval(DateHistogramInterval.MONTH)\n+ .order(Histogram.Order.aggregation(\"stats_missing\", \"sum\", false))\n+ .subAggregation(stats(\"stats\").field(\"value\")))\n+ .get();\n+ fail();\n+ } catch (ElasticsearchException ex) {\n+ Throwable rootCause = ex.getRootCause();\n+ assertThat(rootCause, instanceOf(IllegalArgumentException.class));\n+ assertThat(rootCause.getMessage(), containsString(\"Invalid order path\"));\n+ }\n+ }\n+\n public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception {\n SearchResponse response = client().prepareSearch(\"idx\")\n .addAggregation(dateHistogram(\"histo\")", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n package org.elasticsearch.search.aggregations.bucket;\n \n import com.carrotsearch.hppc.LongHashSet;\n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.index.query.QueryBuilders;\n@@ -56,6 +57,7 @@\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n+import static org.hamcrest.Matchers.instanceOf;\n import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.lessThanOrEqualTo;\n import static org.hamcrest.core.IsNull.notNullValue;\n@@ -470,6 +472,24 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception\n }\n }\n \n+ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception {\n+ try {\n+ client().prepareSearch(\"idx\")\n+ .addAggregation(\n+ histogram(\"histo\")\n+ .field(SINGLE_VALUED_FIELD_NAME)\n+ .interval(interval)\n+ .order(Histogram.Order.aggregation(\"stats_missing.sum\", false))\n+ .subAggregation(stats(\"stats\").field(SINGLE_VALUED_FIELD_NAME)))\n+ .get();\n+ fail();\n+ } catch (ElasticsearchException ex) {\n+ Throwable rootCause = ex.getRootCause();\n+ assertThat(rootCause, instanceOf(IllegalArgumentException.class));\n+ assertThat(rootCause.getMessage(), containsString(\"Invalid order path\"));\n+ }\n+ }\n+\n public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception {\n SearchResponse response = client().prepareSearch(\"idx\")\n .addAggregation(", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java", "status": "modified" } ] }
{ "body": "As part of our improved CORS handling (https://github.com/elastic/elasticsearch/pull/16092), we serialize the `Access-Control-Allow-Headers` and `Access-Control-Allow-Methods` response headers using Netty, which, if given a set of values, produces a response header that looks like:\n\n```\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Headers: X-Requested-With\n```\n\ninstead of a comma-separated list in a single header value \n`Access-Control-Allow-Headers: Content-Type,X-Requested-With`\n\nSeparating each header value out individually _should_ be fine according to the RFC: http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2, although the RFC does leave some ambiguity if such a separation does need to be supported.\n\nIt turns out Chrome is fine with this but IE is not.\n", "comments": [ { "body": "Closed by 4566378cd58415f84bde2252b6b95f6b24e96dff (2.4) and 0fd150d5d9743250b27062c391ff77d8b726c51a (2.3.6)\n", "created_at": "2016-08-10T20:31:31Z" } ], "number": 19841, "title": "CORS multi-value response headers don't work in IE" }
{ "body": "Ensures that CORS preflight requests return multi-value\nAccess-Control-Allow-Headers and Access-Control-Allow-Methods\nresponse headers as single headers with comma separated values,\nwhich is closest to the RFC specification (see http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2) and supports browsers\nlike IE which do not handle separate response header lines for\neach value.\n\nThis PR also refactors some of the CORS code to make it easier\nto test.\n\nNote that the Netty4 module required no updates to the CORS handling\nbecause while Netty3 creates a new header (with the same field name) for\neach value in a collection, Netty4 is more in line with the RFC and creates\na single response header with a comma-separated value.\n\nCloses #19841\n", "number": 19847, "review_comments": [], "title": "Single comma-delimited response header for multiple values" }
{ "commits": [ { "message": "Ensures that CORS preflight requests return multi-value\nAccess-Control-Allow-Headers and Access-Control-Allow-Methods\nresponse headers as single headers with comma separated values,\nwhich is closest to the RFC specification and supports browsers\nlike IE which do not handle separate response header lines for\neach value.\n\nCloses #19841" } ], "files": [ { "diff": "@@ -45,13 +45,11 @@\n import org.elasticsearch.http.HttpServerTransport;\n import org.elasticsearch.http.HttpStats;\n import org.elasticsearch.http.netty3.cors.Netty3CorsConfig;\n-import org.elasticsearch.http.netty3.cors.Netty3CorsConfigBuilder;\n import org.elasticsearch.http.netty3.cors.Netty3CorsHandler;\n import org.elasticsearch.http.netty3.pipelining.HttpPipeliningHandler;\n import org.elasticsearch.monitor.jvm.JvmInfo;\n import org.elasticsearch.rest.RestChannel;\n import org.elasticsearch.rest.RestRequest;\n-import org.elasticsearch.rest.RestUtils;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.BindTransportException;\n import org.elasticsearch.transport.netty3.Netty3OpenChannelsHandler;\n@@ -71,7 +69,6 @@\n import org.jboss.netty.handler.codec.http.HttpChunkAggregator;\n import org.jboss.netty.handler.codec.http.HttpContentCompressor;\n import org.jboss.netty.handler.codec.http.HttpContentDecompressor;\n-import org.jboss.netty.handler.codec.http.HttpMethod;\n import org.jboss.netty.handler.codec.http.HttpRequestDecoder;\n import org.jboss.netty.handler.timeout.ReadTimeoutException;\n \n@@ -81,21 +78,13 @@\n import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.List;\n-import java.util.Set;\n import java.util.concurrent.Executors;\n import java.util.concurrent.atomic.AtomicReference;\n-import java.util.regex.Pattern;\n-import java.util.stream.Collectors;\n \n import static org.elasticsearch.common.settings.Setting.boolSetting;\n import static org.elasticsearch.common.settings.Setting.byteSizeSetting;\n import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL;\n@@ -110,7 +99,6 @@\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;\n-import static org.elasticsearch.http.netty3.cors.Netty3CorsHandler.ANY_ORIGIN;\n \n public class Netty3HttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport {\n \n@@ -257,7 +245,7 @@ public Netty3HttpServerTransport(Settings settings, NetworkService networkServic\n this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);\n this.pipelining = SETTING_PIPELINING.get(settings);\n this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);\n- this.corsConfig = buildCorsConfig(settings);\n+ this.corsConfig = Netty3CorsConfig.buildCorsConfig(settings);\n \n // validate max content length\n if (maxContentLength.bytes() > Integer.MAX_VALUE) {\n@@ -371,35 +359,6 @@ static int resolvePublishPort(Settings settings, List<InetSocketTransportAddress\n return publishPort;\n }\n \n- private Netty3CorsConfig buildCorsConfig(Settings settings) {\n- if (SETTING_CORS_ENABLED.get(settings) == false) {\n- return Netty3CorsConfigBuilder.forOrigins().disable().build();\n- }\n- String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);\n- final Netty3CorsConfigBuilder builder;\n- if (Strings.isNullOrEmpty(origin)) {\n- builder = Netty3CorsConfigBuilder.forOrigins();\n- } else if (origin.equals(ANY_ORIGIN)) {\n- builder = Netty3CorsConfigBuilder.forAnyOrigin();\n- } else {\n- Pattern p = RestUtils.checkCorsSettingForRegex(origin);\n- if (p == null) {\n- builder = Netty3CorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin));\n- } else {\n- builder = Netty3CorsConfigBuilder.forPattern(p);\n- }\n- }\n- if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {\n- builder.allowCredentials();\n- }\n- Set<String> strMethods = Strings.splitStringByCommaToSet(SETTING_CORS_ALLOW_METHODS.get(settings));\n- return builder.allowedRequestMethods(strMethods.stream().map(HttpMethod::valueOf).collect(Collectors.toSet()))\n- .maxAge(SETTING_CORS_MAX_AGE.get(settings))\n- .allowedRequestHeaders(Strings.splitStringByCommaToSet(SETTING_CORS_ALLOW_HEADERS.get(settings)))\n- .shortCircuit()\n- .build();\n- }\n-\n private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {\n final AtomicReference<Exception> lastException = new AtomicReference<>();\n final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();", "filename": "modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java", "status": "modified" }, { "diff": "@@ -19,6 +19,9 @@\n \n package org.elasticsearch.http.netty3.cors;\n \n+import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.rest.RestUtils;\n import org.jboss.netty.handler.codec.http.DefaultHttpHeaders;\n import org.jboss.netty.handler.codec.http.HttpHeaders;\n import org.jboss.netty.handler.codec.http.HttpMethod;\n@@ -30,6 +33,15 @@\n import java.util.Set;\n import java.util.concurrent.Callable;\n import java.util.regex.Pattern;\n+import java.util.stream.Collectors;\n+\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;\n+import static org.elasticsearch.http.netty3.cors.Netty3CorsHandler.ANY_ORIGIN;\n \n /**\n * Configuration for Cross-Origin Resource Sharing (CORS).\n@@ -219,6 +231,38 @@ private static <T> T getValue(final Callable<T> callable) {\n }\n }\n \n+ /**\n+ * Build a {@link Netty3CorsConfig} from the given settings.\n+ */\n+ public static Netty3CorsConfig buildCorsConfig(Settings settings) {\n+ if (SETTING_CORS_ENABLED.get(settings) == false) {\n+ return Netty3CorsConfigBuilder.forOrigins().disable().build();\n+ }\n+ String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);\n+ final Netty3CorsConfigBuilder builder;\n+ if (Strings.isNullOrEmpty(origin)) {\n+ builder = Netty3CorsConfigBuilder.forOrigins();\n+ } else if (origin.equals(ANY_ORIGIN)) {\n+ builder = Netty3CorsConfigBuilder.forAnyOrigin();\n+ } else {\n+ Pattern p = RestUtils.checkCorsSettingForRegex(origin);\n+ if (p == null) {\n+ builder = Netty3CorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin));\n+ } else {\n+ builder = Netty3CorsConfigBuilder.forPattern(p);\n+ }\n+ }\n+ if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {\n+ builder.allowCredentials();\n+ }\n+ Set<String> strMethods = Strings.splitStringByCommaToSet(SETTING_CORS_ALLOW_METHODS.get(settings));\n+ return builder.allowedRequestMethods(strMethods.stream().map(HttpMethod::valueOf).collect(Collectors.toSet()))\n+ .maxAge(SETTING_CORS_MAX_AGE.get(settings))\n+ .allowedRequestHeaders(Strings.splitStringByCommaToSet(SETTING_CORS_ALLOW_HEADERS.get(settings)))\n+ .shortCircuit()\n+ .build();\n+ }\n+\n @Override\n public String toString() {\n return \"CorsConfig[enabled=\" + enabled +", "filename": "modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/cors/Netty3CorsConfig.java", "status": "modified" }, { "diff": "@@ -58,7 +58,6 @@ public class Netty3CorsHandler extends SimpleChannelUpstreamHandler {\n private static Pattern SCHEME_PATTERN = Pattern.compile(\"^https?://\");\n \n private final Netty3CorsConfig config;\n- private HttpRequest request;\n \n /**\n * Creates a new instance with the specified {@link Netty3CorsConfig}.\n@@ -73,12 +72,12 @@ public Netty3CorsHandler(final Netty3CorsConfig config) {\n @Override\n public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws Exception {\n if (config.isCorsSupportEnabled() && e.getMessage() instanceof HttpRequest) {\n- request = (HttpRequest) e.getMessage();\n+ final HttpRequest request = (HttpRequest) e.getMessage();\n if (isPreflightRequest(request)) {\n handlePreflight(ctx, request);\n return;\n }\n- if (config.isShortCircuit() && !validateOrigin()) {\n+ if (config.isShortCircuit() && !validateOrigin(request)) {\n forbidden(ctx, request);\n return;\n }\n@@ -110,16 +109,26 @@ public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp\n }\n \n private void handlePreflight(final ChannelHandlerContext ctx, final HttpRequest request) {\n+ final HttpResponse response = handlePreflight(request);\n+ if (response != null) {\n+ ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);\n+ } else {\n+ forbidden(ctx, request);\n+ }\n+ }\n+\n+ // package private for testing\n+ HttpResponse handlePreflight(final HttpRequest request) {\n final HttpResponse response = new DefaultHttpResponse(request.getProtocolVersion(), OK);\n- if (setOrigin(response)) {\n+ if (setOrigin(request, response)) {\n setAllowMethods(response);\n setAllowHeaders(response);\n setAllowCredentials(response);\n setMaxAge(response);\n setPreflightHeaders(response);\n- ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);\n+ return response;\n } else {\n- forbidden(ctx, request);\n+ return null;\n }\n }\n \n@@ -149,7 +158,7 @@ private void setPreflightHeaders(final HttpResponse response) {\n response.headers().add(config.preflightResponseHeaders());\n }\n \n- private boolean setOrigin(final HttpResponse response) {\n+ private boolean setOrigin(final HttpRequest request, final HttpResponse response) {\n final String origin = request.headers().get(ORIGIN);\n if (!Strings.isNullOrEmpty(origin)) {\n if (\"null\".equals(origin) && config.isNullOriginAllowed()) {\n@@ -158,7 +167,7 @@ private boolean setOrigin(final HttpResponse response) {\n }\n if (config.isAnyOriginSupported()) {\n if (config.isCredentialsAllowed()) {\n- echoRequestOrigin(response);\n+ echoRequestOrigin(request, response);\n setVaryHeader(response);\n } else {\n setAnyOrigin(response);\n@@ -174,7 +183,7 @@ private boolean setOrigin(final HttpResponse response) {\n return false;\n }\n \n- private boolean validateOrigin() {\n+ private boolean validateOrigin(final HttpRequest request) {\n if (config.isAnyOriginSupported()) {\n return true;\n }\n@@ -197,7 +206,7 @@ private boolean validateOrigin() {\n return config.isOriginAllowed(origin);\n }\n \n- private void echoRequestOrigin(final HttpResponse response) {\n+ private static void echoRequestOrigin(final HttpRequest request, final HttpResponse response) {\n setOrigin(response, request.headers().get(ORIGIN));\n }\n \n@@ -228,13 +237,13 @@ private static boolean isPreflightRequest(final HttpRequest request) {\n }\n \n private void setAllowMethods(final HttpResponse response) {\n- response.headers().set(ACCESS_CONTROL_ALLOW_METHODS, config.allowedRequestMethods().stream()\n- .map(m -> m.getName().trim())\n- .collect(Collectors.toList()));\n+ response.headers().set(ACCESS_CONTROL_ALLOW_METHODS, Strings.collectionToCommaDelimitedString(\n+ config.allowedRequestMethods().stream().map(m -> m.getName().trim()).collect(Collectors.toList()))\n+ );\n }\n \n private void setAllowHeaders(final HttpResponse response) {\n- response.headers().set(ACCESS_CONTROL_ALLOW_HEADERS, config.allowedRequestHeaders());\n+ response.headers().set(ACCESS_CONTROL_ALLOW_HEADERS, Strings.collectionToCommaDelimitedString(config.allowedRequestHeaders()));\n }\n \n private void setMaxAge(final HttpResponse response) {", "filename": "modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/cors/Netty3CorsHandler.java", "status": "modified" }, { "diff": "@@ -370,7 +370,7 @@ public List<Object> getWrittenObjects() {\n }\n }\n \n- private static class TestHttpRequest implements HttpRequest {\n+ public static class TestHttpRequest implements HttpRequest {\n \n private HttpHeaders headers = new DefaultHttpHeaders();\n ", "filename": "modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpChannelTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,94 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.http.netty3.cors;\n+\n+import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.http.netty3.Netty3HttpChannelTests;\n+import org.elasticsearch.test.ESTestCase;\n+import org.jboss.netty.handler.codec.http.HttpHeaders;\n+import org.jboss.netty.handler.codec.http.HttpMethod;\n+import org.jboss.netty.handler.codec.http.HttpRequest;\n+import org.jboss.netty.handler.codec.http.HttpResponse;\n+\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n+\n+/**\n+ * Tests for {@link Netty3CorsHandler}\n+ */\n+public class Netty3CorsHandlerTests extends ESTestCase {\n+\n+ public void testPreflightMultiValueResponseHeaders() {\n+ // test when only one value\n+ String headersRequestHeader = \"content-type\";\n+ String methodsRequestHeader = \"GET\";\n+ Settings settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), Netty3CorsHandler.ANY_ORIGIN)\n+ .put(SETTING_CORS_ALLOW_HEADERS.getKey(), headersRequestHeader)\n+ .put(SETTING_CORS_ALLOW_METHODS.getKey(), methodsRequestHeader)\n+ .build();\n+ HttpResponse response = execPreflight(settings, Netty3CorsHandler.ANY_ORIGIN, \"request-host\");\n+ assertEquals(headersRequestHeader, response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS));\n+ assertEquals(methodsRequestHeader, response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS));\n+\n+ // test with a set of values\n+ headersRequestHeader = \"content-type,x-requested-with,accept\";\n+ methodsRequestHeader = \"GET,POST\";\n+ settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), Netty3CorsHandler.ANY_ORIGIN)\n+ .put(SETTING_CORS_ALLOW_HEADERS.getKey(), headersRequestHeader)\n+ .put(SETTING_CORS_ALLOW_METHODS.getKey(), methodsRequestHeader)\n+ .build();\n+ response = execPreflight(settings, Netty3CorsHandler.ANY_ORIGIN, \"request-host\");\n+ assertEquals(Strings.commaDelimitedListToSet(headersRequestHeader),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS)));\n+ assertEquals(Strings.commaDelimitedListToSet(methodsRequestHeader),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS)));\n+\n+ // test with defaults\n+ settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), Netty3CorsHandler.ANY_ORIGIN)\n+ .build();\n+ response = execPreflight(settings, Netty3CorsHandler.ANY_ORIGIN, \"request-host\");\n+ assertEquals(Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(settings)),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS)));\n+ assertEquals(Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(settings)),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS)));\n+ }\n+\n+ private HttpResponse execPreflight(final Settings settings, final String originValue, final String host) {\n+ // simulate execution of a preflight request\n+ HttpRequest httpRequest = new Netty3HttpChannelTests.TestHttpRequest();\n+ httpRequest.setMethod(HttpMethod.OPTIONS);\n+ httpRequest.headers().add(HttpHeaders.Names.ORIGIN, originValue);\n+ httpRequest.headers().add(HttpHeaders.Names.HOST, host);\n+ httpRequest.headers().add(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n+\n+ Netty3CorsHandler corsHandler = new Netty3CorsHandler(Netty3CorsConfig.buildCorsConfig(settings));\n+ return corsHandler.handlePreflight(httpRequest);\n+ }\n+\n+}", "filename": "modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/cors/Netty3CorsHandlerTests.java", "status": "added" }, { "diff": "@@ -33,13 +33,11 @@\n import io.netty.channel.RecvByteBufAllocator;\n import io.netty.channel.nio.NioEventLoopGroup;\n import io.netty.channel.oio.OioEventLoopGroup;\n-import io.netty.channel.socket.SocketChannel;\n import io.netty.channel.socket.nio.NioServerSocketChannel;\n import io.netty.channel.socket.oio.OioServerSocketChannel;\n import io.netty.handler.codec.ByteToMessageDecoder;\n import io.netty.handler.codec.http.HttpContentCompressor;\n import io.netty.handler.codec.http.HttpContentDecompressor;\n-import io.netty.handler.codec.http.HttpMethod;\n import io.netty.handler.codec.http.HttpObjectAggregator;\n import io.netty.handler.codec.http.HttpRequestDecoder;\n import io.netty.handler.codec.http.HttpResponseEncoder;\n@@ -68,13 +66,11 @@\n import org.elasticsearch.http.HttpServerTransport;\n import org.elasticsearch.http.HttpStats;\n import org.elasticsearch.http.netty4.cors.Netty4CorsConfig;\n-import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder;\n import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;\n import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler;\n import org.elasticsearch.monitor.jvm.JvmInfo;\n import org.elasticsearch.rest.RestChannel;\n import org.elasticsearch.rest.RestRequest;\n-import org.elasticsearch.rest.RestUtils;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.BindTransportException;\n import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler;\n@@ -88,17 +84,11 @@\n import java.util.List;\n import java.util.concurrent.TimeUnit;\n import java.util.concurrent.atomic.AtomicReference;\n-import java.util.regex.Pattern;\n \n import static org.elasticsearch.common.settings.Setting.boolSetting;\n import static org.elasticsearch.common.settings.Setting.byteSizeSetting;\n import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n-import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL;\n@@ -113,7 +103,6 @@\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;\n import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;\n-import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN;\n \n public class Netty4HttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport {\n \n@@ -261,7 +250,7 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic\n this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);\n this.pipelining = SETTING_PIPELINING.get(settings);\n this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);\n- this.corsConfig = buildCorsConfig(settings);\n+ this.corsConfig = Netty4CorsConfig.buildCorsConfig(settings);\n \n // validate max content length\n if (maxContentLength.bytes() > Integer.MAX_VALUE) {\n@@ -382,39 +371,6 @@ static int resolvePublishPort(Settings settings, List<InetSocketTransportAddress\n return publishPort;\n }\n \n- private Netty4CorsConfig buildCorsConfig(Settings settings) {\n- if (SETTING_CORS_ENABLED.get(settings) == false) {\n- return Netty4CorsConfigBuilder.forOrigins().disable().build();\n- }\n- String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);\n- final Netty4CorsConfigBuilder builder;\n- if (Strings.isNullOrEmpty(origin)) {\n- builder = Netty4CorsConfigBuilder.forOrigins();\n- } else if (origin.equals(ANY_ORIGIN)) {\n- builder = Netty4CorsConfigBuilder.forAnyOrigin();\n- } else {\n- Pattern p = RestUtils.checkCorsSettingForRegex(origin);\n- if (p == null) {\n- builder = Netty4CorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin));\n- } else {\n- builder = Netty4CorsConfigBuilder.forPattern(p);\n- }\n- }\n- if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {\n- builder.allowCredentials();\n- }\n- String[] strMethods = settings.getAsArray(SETTING_CORS_ALLOW_METHODS.getKey());\n- HttpMethod[] methods = Arrays.asList(strMethods)\n- .stream()\n- .map(HttpMethod::valueOf)\n- .toArray(size -> new HttpMethod[size]);\n- return builder.allowedRequestMethods(methods)\n- .maxAge(SETTING_CORS_MAX_AGE.get(settings))\n- .allowedRequestHeaders(settings.getAsArray(SETTING_CORS_ALLOW_HEADERS.getKey()))\n- .shortCircuit()\n- .build();\n- }\n-\n private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {\n final AtomicReference<Exception> lastException = new AtomicReference<>();\n final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java", "status": "modified" }, { "diff": "@@ -23,7 +23,11 @@\n import io.netty.handler.codec.http.EmptyHttpHeaders;\n import io.netty.handler.codec.http.HttpHeaders;\n import io.netty.handler.codec.http.HttpMethod;\n+import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.rest.RestUtils;\n \n+import java.util.Arrays;\n import java.util.Collections;\n import java.util.LinkedHashSet;\n import java.util.Map;\n@@ -32,6 +36,14 @@\n import java.util.concurrent.Callable;\n import java.util.regex.Pattern;\n \n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;\n+import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN;\n+\n /**\n * Configuration for Cross-Origin Resource Sharing (CORS).\n *\n@@ -232,4 +244,40 @@ public String toString() {\n \", preflightHeaders=\" + preflightHeaders + ']';\n }\n \n+ /**\n+ * Constructs a {@link Netty4CorsConfig} from the given settings.\n+ */\n+ public static Netty4CorsConfig buildCorsConfig(Settings settings) {\n+ if (SETTING_CORS_ENABLED.get(settings) == false) {\n+ return Netty4CorsConfigBuilder.forOrigins().disable().build();\n+ }\n+ String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);\n+ final Netty4CorsConfigBuilder builder;\n+ if (Strings.isNullOrEmpty(origin)) {\n+ builder = Netty4CorsConfigBuilder.forOrigins();\n+ } else if (origin.equals(ANY_ORIGIN)) {\n+ builder = Netty4CorsConfigBuilder.forAnyOrigin();\n+ } else {\n+ Pattern p = RestUtils.checkCorsSettingForRegex(origin);\n+ if (p == null) {\n+ builder = Netty4CorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin));\n+ } else {\n+ builder = Netty4CorsConfigBuilder.forPattern(p);\n+ }\n+ }\n+ if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {\n+ builder.allowCredentials();\n+ }\n+ String[] strMethods = Strings.splitStringByCommaToArray(SETTING_CORS_ALLOW_METHODS.get(settings));\n+ HttpMethod[] methods = Arrays.asList(strMethods)\n+ .stream()\n+ .map(HttpMethod::valueOf)\n+ .toArray(size -> new HttpMethod[size]);\n+ return builder.allowedRequestMethods(methods)\n+ .maxAge(SETTING_CORS_MAX_AGE.get(settings))\n+ .allowedRequestHeaders(Strings.splitStringByCommaToArray(SETTING_CORS_ALLOW_HEADERS.get(settings)))\n+ .shortCircuit()\n+ .build();\n+ }\n+\n }", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java", "status": "modified" }, { "diff": "@@ -48,7 +48,6 @@ public class Netty4CorsHandler extends ChannelDuplexHandler {\n private static Pattern SCHEME_PATTERN = Pattern.compile(\"^https?://\");\n \n private final Netty4CorsConfig config;\n- private HttpRequest request;\n \n /**\n * Creates a new instance with the specified {@link Netty4CorsConfig}.\n@@ -63,12 +62,12 @@ public Netty4CorsHandler(final Netty4CorsConfig config) {\n @Override\n public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {\n if (config.isCorsSupportEnabled() && msg instanceof HttpRequest) {\n- request = (HttpRequest) msg;\n+ final HttpRequest request = (HttpRequest) msg;\n if (isPreflightRequest(request)) {\n handlePreflight(ctx, request);\n return;\n }\n- if (config.isShortCircuit() && !validateOrigin()) {\n+ if (config.isShortCircuit() && !validateOrigin(request)) {\n forbidden(ctx, request);\n return;\n }\n@@ -100,16 +99,26 @@ public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp\n }\n \n private void handlePreflight(final ChannelHandlerContext ctx, final HttpRequest request) {\n+ final HttpResponse response = handlePreflight(request);\n+ if (response != null) {\n+ ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);\n+ } else {\n+ forbidden(ctx, request);\n+ }\n+ }\n+\n+ // package private for testing\n+ HttpResponse handlePreflight(final HttpRequest request) {\n final HttpResponse response = new DefaultFullHttpResponse(request.protocolVersion(), HttpResponseStatus.OK, true, true);\n- if (setOrigin(response)) {\n+ if (setOrigin(request, response)) {\n setAllowMethods(response);\n setAllowHeaders(response);\n setAllowCredentials(response);\n setMaxAge(response);\n setPreflightHeaders(response);\n- ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);\n+ return response;\n } else {\n- forbidden(ctx, request);\n+ return null;\n }\n }\n \n@@ -139,7 +148,7 @@ private void setPreflightHeaders(final HttpResponse response) {\n response.headers().add(config.preflightResponseHeaders());\n }\n \n- private boolean setOrigin(final HttpResponse response) {\n+ private boolean setOrigin(final HttpRequest request, final HttpResponse response) {\n final String origin = request.headers().get(HttpHeaderNames.ORIGIN);\n if (!Strings.isNullOrEmpty(origin)) {\n if (\"null\".equals(origin) && config.isNullOriginAllowed()) {\n@@ -149,7 +158,7 @@ private boolean setOrigin(final HttpResponse response) {\n \n if (config.isAnyOriginSupported()) {\n if (config.isCredentialsAllowed()) {\n- echoRequestOrigin(response);\n+ echoRequestOrigin(request, response);\n setVaryHeader(response);\n } else {\n setAnyOrigin(response);\n@@ -165,7 +174,7 @@ private boolean setOrigin(final HttpResponse response) {\n return false;\n }\n \n- private boolean validateOrigin() {\n+ private boolean validateOrigin(final HttpRequest request) {\n if (config.isAnyOriginSupported()) {\n return true;\n }\n@@ -188,7 +197,7 @@ private boolean validateOrigin() {\n return config.isOriginAllowed(origin);\n }\n \n- private void echoRequestOrigin(final HttpResponse response) {\n+ private static void echoRequestOrigin(final HttpRequest request, final HttpResponse response) {\n setOrigin(response, request.headers().get(HttpHeaderNames.ORIGIN));\n }\n \n@@ -219,9 +228,8 @@ private static boolean isPreflightRequest(final HttpRequest request) {\n }\n \n private void setAllowMethods(final HttpResponse response) {\n- response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS, config.allowedRequestMethods().stream()\n- .map(m -> m.name().trim())\n- .collect(Collectors.toList()));\n+ response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,\n+ config.allowedRequestMethods().stream().map(m -> m.name().trim()).collect(Collectors.toList()));\n }\n \n private void setAllowHeaders(final HttpResponse response) {", "filename": "modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java", "status": "modified" }, { "diff": "@@ -0,0 +1,94 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.http.netty4.cors;\n+\n+import io.netty.handler.codec.http.DefaultFullHttpRequest;\n+import io.netty.handler.codec.http.FullHttpRequest;\n+import io.netty.handler.codec.http.HttpHeaderNames;\n+import io.netty.handler.codec.http.HttpMethod;\n+import io.netty.handler.codec.http.HttpResponse;\n+import io.netty.handler.codec.http.HttpVersion;\n+import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;\n+import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;\n+\n+/**\n+ * Tests for {@link Netty4CorsHandler}\n+ */\n+public class Netty4CorsHandlerTests extends ESTestCase {\n+\n+ public void testPreflightMultiValueResponseHeaders() {\n+ // test when only one value\n+ String headersRequestHeader = \"content-type\";\n+ String methodsRequestHeader = \"GET\";\n+ Settings settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), Netty4CorsHandler.ANY_ORIGIN)\n+ .put(SETTING_CORS_ALLOW_HEADERS.getKey(), headersRequestHeader)\n+ .put(SETTING_CORS_ALLOW_METHODS.getKey(), methodsRequestHeader)\n+ .build();\n+ HttpResponse response = execPreflight(settings, Netty4CorsHandler.ANY_ORIGIN, \"request-host\");\n+ assertEquals(headersRequestHeader, response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS));\n+ assertEquals(methodsRequestHeader, response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS));\n+\n+ // test with a set of values\n+ headersRequestHeader = \"content-type,x-requested-with,accept\";\n+ methodsRequestHeader = \"GET,POST\";\n+ settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), Netty4CorsHandler.ANY_ORIGIN)\n+ .put(SETTING_CORS_ALLOW_HEADERS.getKey(), headersRequestHeader)\n+ .put(SETTING_CORS_ALLOW_METHODS.getKey(), methodsRequestHeader)\n+ .build();\n+ response = execPreflight(settings, Netty4CorsHandler.ANY_ORIGIN, \"request-host\");\n+ assertEquals(Strings.commaDelimitedListToSet(headersRequestHeader),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS)));\n+ assertEquals(Strings.commaDelimitedListToSet(methodsRequestHeader),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS)));\n+\n+ // test with defaults\n+ settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), Netty4CorsHandler.ANY_ORIGIN)\n+ .build();\n+ response = execPreflight(settings, Netty4CorsHandler.ANY_ORIGIN, \"request-host\");\n+ assertEquals(Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(settings)),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS)));\n+ assertEquals(Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(settings)),\n+ Strings.commaDelimitedListToSet(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS)));\n+ }\n+\n+ private HttpResponse execPreflight(final Settings settings, final String originValue, final String host) {\n+ // simulate execution of a preflight request\n+ final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, \"/\");\n+ httpRequest.setMethod(HttpMethod.OPTIONS);\n+ httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue);\n+ httpRequest.headers().add(HttpHeaderNames.HOST, host);\n+ httpRequest.headers().add(HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n+\n+ Netty4CorsHandler corsHandler = new Netty4CorsHandler(Netty4CorsConfig.buildCorsConfig(settings));\n+ return corsHandler.handlePreflight(httpRequest);\n+ }\n+}", "filename": "modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandlerTests.java", "status": "added" } ] }
{ "body": "**Elasticsearch version**: 2.3.4\n\n**JVM version**: 1.8\n\n**OS version**: OSX 10.11\n\n**Description of the problem including expected versus actual behavior**:\n\nThere is a possibility to provide multiple fields to queries like so:\n\n``` json\n{\n \"query\": {\n \"range\": {\n \"age\": {\n \"gte\": 30,\n \"lte\": 40\n },\n \"price\": {\n \"gte\": 10,\n \"lte\": 30\n }\n }\n }\n}\n```\n\nHere I provide two fields to a single query and elasticsearch provides legitimate results for such a query. However, the results returned always only include hits from the last field request, that is, if I were to execute the query given above, I would only get results from the `price` field. Moreover I could not find any mentions of such behaviour in the official docs. \nAnalogous behaviour was observed with `prefix`, `regexp`, `geo_distance` queries and more.\nLogically this should either provide the hits for both provided fields or not work at all.\n", "comments": [ { "body": "The range query supports one field, like most of the elasticsearch queries. If you need to execute two range queries you need to combine them using a bool query and two must clauses that contain one range query each.\n\nI don't see the range query supporting multiple fields in the future. What we could do though is throw an error instead of accepting such a query, which is effectively malformed. Otherwise we make users think that we support this syntax although we don't. I am a bit on the fence about this though cause that would mean adding this \"is field already set\" type of check potentially in a lot of places. The \"keep the last one\" behaviour comes with the way we pull parse json and this same situation can happen in many other places in our codebase.\n", "created_at": "2016-07-22T10:11:08Z" }, { "body": "Maybe there are things to do to make it less likely to have this error in the future (we already had a lot of similar bugs reported, and there will probably be other ones), but I think it is important to fail on bad inputs.\n", "created_at": "2016-07-22T21:59:40Z" }, { "body": "This query doesn't throw an exception in 5.0, and it should.\n", "created_at": "2016-07-27T15:26:02Z" }, { "body": "> This query doesn't throw an exception in 5.0, and it should.\n\nYes it should. But we have this problem in each single query of our DSL. We can fix it here and forget about all the other places where we have this problem, or maybe see what we can do to fix it in more places (preferably everywhere). I have this specific fix in one of my branches. I just felt bad about fixing this single case and wondered if we could do something better other than going through each single query one by one. That is why I had marked discuss, sorry for the confusion.\n", "created_at": "2016-07-27T15:32:46Z" }, { "body": "+1 for a more comprehensive fix\n", "created_at": "2016-07-27T15:34:24Z" } ], "number": 19547, "title": "Multiple fields provided to queries return results for the last one only" }
{ "body": "Most of our queries support a single field name, the field that gets queried. That is the key of the object which contains the query options. For such queries, in case multiple fields are presented, in most of the cases the query goes through and either the last or the first field only will be read and queried.\n\nThis PR changes the behaviour of all those parsers to throw exception in case multiple field names are provided.\n\nCloses #19547\n", "number": 19791, "review_comments": [ { "body": "I removed these checks at the end of parsing as they are not needed. The query builder throws error anyways when required fields are missing as we validate its input and that is also what we test already (testIllegalArguments test method).\n", "created_at": "2016-08-03T20:27:58Z" }, { "body": "this type of change is mechanical and the same for some of the parsers that didn't loop till the END_OBJECT. That would cause the query to not be completely read in case multiple fields are presented. That would be caught by the new check I am introducing in QueryParseContext#parseInnerQueryBuilder, but I think it is better to change the way we parse those queries to exhaust their own object at all times.\n", "created_at": "2016-08-03T20:29:41Z" }, { "body": "this new check is just best effort, but should help finding out when parsers don't properly exhaust their own object. I don't think the previous leniency was needed.\n", "created_at": "2016-08-03T20:31:08Z" }, { "body": "should we check the return value?\n", "created_at": "2016-08-05T06:32:12Z" }, { "body": "I tend to like expectThrows better for doing this.\n", "created_at": "2016-08-05T06:33:23Z" }, { "body": "yea that is what most people do, but I really like checking the error message too, which I don't think is possible with expectThrows is it?\n", "created_at": "2016-08-05T07:51:20Z" }, { "body": "I am not sure we can make assumptions about what it is. what could it be? another start object, end object...not sure what else...\n", "created_at": "2016-08-05T07:52:30Z" }, { "body": "Exception e = expectThrows(Exception.class, () -> doSomething());\nassertEquals(e.getMessage(), containsString(\"bla\"));\n", "created_at": "2016-08-05T08:00:15Z" }, { "body": "oh... now I feel silly :) thanks @tlrx !!!\n", "created_at": "2016-08-05T08:03:02Z" }, { "body": "Yes, I'm not sure either if we can make any assumption here\n", "created_at": "2016-08-05T08:29:20Z" }, { "body": "Match query is tricky because it can be written as:\n\n```\n{\n \"match\" : {\n \"message1\" : {\n \"query\" : \"this is a test\"\n }\n }\n}\n```\n\nbut also:\n\n```\n{\n \"match\" : {\n \"message1\" : \"this is a test\"\n }\n}\n```\n\nand in case of multiple fields we just catch the first form, not the second which is very used too.\n", "created_at": "2016-08-05T08:36:04Z" }, { "body": "Same comment as for `match`\n", "created_at": "2016-08-05T08:40:56Z" }, { "body": "many queries actually have a short syntax like the match one. I'm afraid what you bring up is a problem that's common to them all. I will dig.\n", "created_at": "2016-08-05T08:49:49Z" }, { "body": "Yes, I know, sorry for that :(\n", "created_at": "2016-08-05T08:54:01Z" }, { "body": "I pushed a commit that introduces using expectThrows wherever possible in our query tests. That way I will remember it for the next time as I had to introduce a kagillion of those... also PR stats got much better :)\n", "created_at": "2016-08-05T11:57:30Z" }, { "body": "I suspect that this situation is already at least partially caught by making QueryParseContext#parseInnerQueryBuilder stricter as in checking what is the current token after the query gets parsed. I have ideas on how to test it too but I'd prefer to do this in a followup PR if you don' mind. We already test these short syntaxes but we never inject bogus objects in them like we do with the \"standard\" json output, we can totally introduce that for more coverage.\n", "created_at": "2016-08-05T13:43:58Z" }, { "body": "Sure.\n\n> We already test these short syntaxes but we never inject bogus objects in them like we do with the \"standard\" json output, we can totally introduce that for more coverage.\n\nSince query builders now implements ToXContent I think we could introduce bogus object using a XContentBuilder that randomly duplicates fields. Just an idea.\n", "created_at": "2016-08-05T13:48:13Z" }, { "body": "back on this, I think I am sure now we can make assumption that this must be an end_object. I opened #20528 .\n", "created_at": "2016-09-16T17:01:07Z" }, { "body": "Thanks for the heads up - I agree too.\n", "created_at": "2016-09-19T07:12:02Z" } ], "title": "Query parsers to throw exception when multiple field names are provided" }
{ "commits": [ { "message": "Throw parsing error if range query contains multiple fields\n\nRange Query, like many other queries, used to parse when the query refers to multiple fields and the last one would win. We rather throw an exception now instead.\n\nCloses #19547" }, { "message": "Throw parsing error if prefix query contains multiple fields\n\nPrefix Query, like many other queries, used to parse when the query refers to multiple fields and the last one would win. We rather throw an exception now instead.\nAlso added tests for short prefix quer variant." }, { "message": "Throw parsing error if regexp query contains multiple fields\n\nRegexp Query, like many other queries, used to parse even when the query referred to multiple fields and the last one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant." }, { "message": "Throw parsing error if wildcard query contains multiple fields\n\nWildcard Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant and modified the parsing code to consume the whole query object." }, { "message": "Throw parsing error if match_phrase query contains multiple fields\n\nMatch phrase Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant and modified the parsing code to consume the whole query object." }, { "message": "[TEST] check validation error messages in IdsQueryBuilderTests" }, { "message": "Throw parsing error if geo_distance query contains multiple fields\n\nGeo distance Query, like many other queries, used to parse even when the query referred to multiple fields and the last one would win. We rather throw an exception now instead." }, { "message": "Throw parsing error if match_phrase_prefix query contains multiple fields\n\nMatch phrase prefix Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant and modified the parsing code to consume the whole query object." }, { "message": "Throw parsing error if match query contains multiple fields\n\nMatch Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant and modified the parsing code to consume the whole query object." }, { "message": "Throw parsing error if common terms query contains multiple fields\n\nCommon Terms Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant and modified the parsing code to consume the whole query object." }, { "message": "Throw parsing error if span_term query contains multiple fields\n\nSpan term Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso modified the parsing code to consume the whole query object." }, { "message": "[TEST] check validation error messages in AbstractTermQueryTestCase" }, { "message": "[TEST] test that term query throws error when made against multiple fields" }, { "message": "Throw parsing error if fuzzy query contains multiple fields\n\nFuzzy Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead.\nAlso added test for short prefix query variant and modified the parsing code to consume the whole query object." }, { "message": "Make query parsing stricter by requiring each parser to stop at END_OBJECT token\n\nInstead of being lenient in QueryParseContext#parseInnerQueryBuilder we check that the token where the parser stopped reading was END_OBJECT, and throw error otherwise. This is a best effort to verify that the parsers read a whole object rather than stepping out in the middle of it due to malformed queries." }, { "message": "fix line length in FuzzyQueryBuilder" }, { "message": "[TEST] use expectThrows wherever possible in query builder unit tests" } ], "files": [ { "diff": "@@ -102,7 +102,7 @@ public CommonTermsQueryBuilder(String fieldName, Object text) {\n throw new IllegalArgumentException(\"field name is null or empty\");\n }\n if (text == null) {\n- throw new IllegalArgumentException(\"text cannot be null.\");\n+ throw new IllegalArgumentException(\"text cannot be null\");\n }\n this.fieldName = fieldName;\n this.text = text;\n@@ -265,11 +265,8 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n \n public static Optional<CommonTermsQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n- XContentParser.Token token = parser.nextToken();\n- if (token != XContentParser.Token.FIELD_NAME) {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME + \"] query malformed, no field\");\n- }\n- String fieldName = parser.currentName();\n+\n+ String fieldName = null;\n Object text = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n String analyzer = null;\n@@ -280,78 +277,79 @@ public static Optional<CommonTermsQueryBuilder> fromXContent(QueryParseContext p\n Operator lowFreqOperator = CommonTermsQueryBuilder.DEFAULT_LOW_FREQ_OCCUR;\n float cutoffFrequency = CommonTermsQueryBuilder.DEFAULT_CUTOFF_FREQ;\n String queryName = null;\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else if (token == XContentParser.Token.START_OBJECT) {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {\n- String innerFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- innerFieldName = parser.currentName();\n- } else if (token.isValue()) {\n- if (parseContext.getParseFieldMatcher().match(innerFieldName, LOW_FREQ_FIELD)) {\n- lowFreqMinimumShouldMatch = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(innerFieldName, HIGH_FREQ_FIELD)) {\n- highFreqMinimumShouldMatch = parser.text();\n+ XContentParser.Token token;\n+ String currentFieldName = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n+ // skip\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[common] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {\n+ String innerFieldName = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ innerFieldName = parser.currentName();\n+ } else if (token.isValue()) {\n+ if (parseContext.getParseFieldMatcher().match(innerFieldName, LOW_FREQ_FIELD)) {\n+ lowFreqMinimumShouldMatch = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(innerFieldName, HIGH_FREQ_FIELD)) {\n+ highFreqMinimumShouldMatch = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n+ \"] query does not support [\" + innerFieldName\n+ + \"] for [\" + currentFieldName + \"]\");\n+ }\n } else {\n throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n- \"] query does not support [\" + innerFieldName\n- + \"] for [\" + currentFieldName + \"]\");\n+ \"] unexpected token type [\" + token\n+ + \"] after [\" + innerFieldName + \"]\");\n }\n- } else {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n- \"] unexpected token type [\" + token\n- + \"] after [\" + innerFieldName + \"]\");\n }\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n+ \"] query does not support [\" + currentFieldName + \"]\");\n+ }\n+ } else if (token.isValue()) {\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {\n+ text = parser.objectText();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {\n+ analyzer = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, DISABLE_COORD_FIELD)) {\n+ disableCoord = parser.booleanValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, HIGH_FREQ_OPERATOR_FIELD)) {\n+ highFreqOperator = Operator.fromString(parser.text());\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOW_FREQ_OPERATOR_FIELD)) {\n+ lowFreqOperator = Operator.fromString(parser.text());\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {\n+ lowFreqMinimumShouldMatch = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {\n+ cutoffFrequency = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n+ \"] query does not support [\" + currentFieldName + \"]\");\n }\n- } else {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n- \"] query does not support [\" + currentFieldName + \"]\");\n- }\n- } else if (token.isValue()) {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {\n- text = parser.objectText();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {\n- analyzer = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, DISABLE_COORD_FIELD)) {\n- disableCoord = parser.booleanValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, HIGH_FREQ_OPERATOR_FIELD)) {\n- highFreqOperator = Operator.fromString(parser.text());\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOW_FREQ_OPERATOR_FIELD)) {\n- lowFreqOperator = Operator.fromString(parser.text());\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {\n- lowFreqMinimumShouldMatch = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {\n- cutoffFrequency = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n- } else {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + CommonTermsQueryBuilder.NAME +\n- \"] query does not support [\" + currentFieldName + \"]\");\n }\n }\n- }\n- parser.nextToken();\n- } else {\n- text = parser.objectText();\n- // move to the next token\n- token = parser.nextToken();\n- if (token != XContentParser.Token.END_OBJECT) {\n- throw new ParsingException(parser.getTokenLocation(),\n- \"[common] query parsed in simplified form, with direct field name, but included more options than just \" +\n- \"the field name, possibly use its 'options' form, with 'query' element?\");\n+ } else {\n+ fieldName = parser.currentName();\n+ text = parser.objectText();\n }\n }\n \n- if (text == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No text specified for text query\");\n- }\n return Optional.of(new CommonTermsQueryBuilder(fieldName, text)\n .lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch)\n .highFreqMinimumShouldMatch(highFreqMinimumShouldMatch)", "filename": "core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java", "status": "modified" }, { "diff": "@@ -152,7 +152,7 @@ public FuzzyQueryBuilder(String fieldName, boolean value) {\n */\n public FuzzyQueryBuilder(String fieldName, Object value) {\n if (Strings.isEmpty(fieldName)) {\n- throw new IllegalArgumentException(\"field name cannot be null or empty.\");\n+ throw new IllegalArgumentException(\"field name cannot be null or empty\");\n }\n if (value == null) {\n throw new IllegalArgumentException(\"query value cannot be null\");\n@@ -258,63 +258,60 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n \n public static Optional<FuzzyQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n-\n- XContentParser.Token token = parser.nextToken();\n- if (token != XContentParser.Token.FIELD_NAME) {\n- throw new ParsingException(parser.getTokenLocation(), \"[fuzzy] query malformed, no field\");\n- }\n-\n- String fieldName = parser.currentName();\n+ String fieldName = null;\n Object value = null;\n-\n Fuzziness fuzziness = FuzzyQueryBuilder.DEFAULT_FUZZINESS;\n int prefixLength = FuzzyQueryBuilder.DEFAULT_PREFIX_LENGTH;\n int maxExpansions = FuzzyQueryBuilder.DEFAULT_MAX_EXPANSIONS;\n boolean transpositions = FuzzyQueryBuilder.DEFAULT_TRANSPOSITIONS;\n String rewrite = null;\n-\n String queryName = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n-\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {\n- value = parser.objectBytes();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {\n- value = parser.objectBytes();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {\n- fuzziness = Fuzziness.parse(parser);\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {\n- prefixLength = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {\n- maxExpansions = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) {\n- transpositions = parser.booleanValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {\n- rewrite = parser.textOrNull();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n+ String currentFieldName = null;\n+ XContentParser.Token token;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n+ // skip\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[fuzzy] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n } else {\n- throw new ParsingException(parser.getTokenLocation(), \"[fuzzy] query does not support [\" + currentFieldName + \"]\");\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {\n+ value = parser.objectBytes();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {\n+ value = parser.objectBytes();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {\n+ fuzziness = Fuzziness.parse(parser);\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {\n+ prefixLength = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {\n+ maxExpansions = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) {\n+ transpositions = parser.booleanValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {\n+ rewrite = parser.textOrNull();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"[fuzzy] query does not support [\" + currentFieldName + \"]\");\n+ }\n }\n }\n+ } else {\n+ fieldName = parser.currentName();\n+ value = parser.objectBytes();\n }\n- parser.nextToken();\n- } else {\n- value = parser.objectBytes();\n- // move to the next token\n- parser.nextToken();\n- }\n-\n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"no value specified for fuzzy query\");\n }\n return Optional.of(new FuzzyQueryBuilder(fieldName, value)\n .fuzziness(fuzziness)", "filename": "core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java", "status": "modified" }, { "diff": "@@ -359,9 +359,12 @@ public static Optional<GeoDistanceQueryBuilder> fromXContent(QueryParseContext p\n fieldName = currentFieldName;\n GeoUtils.parseGeoPoint(parser, point);\n } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[geo_distance] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n // the json in the format of -> field : { lat : 30, lon : 12 }\n String currentName = parser.currentName();\n- assert currentFieldName != null;\n fieldName = currentFieldName;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {", "filename": "core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java", "status": "modified" }, { "diff": "@@ -192,62 +192,55 @@ protected int doHashCode() {\n \n public static Optional<MatchPhrasePrefixQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n-\n- XContentParser.Token token = parser.nextToken();\n- if (token != XContentParser.Token.FIELD_NAME) {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME + \"] query malformed, no field\");\n- }\n- String fieldName = parser.currentName();\n-\n+ String fieldName = null;\n Object value = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n String analyzer = null;\n int slop = MatchQuery.DEFAULT_PHRASE_SLOP;\n int maxExpansion = FuzzyQuery.defaultMaxExpansions;\n String queryName = null;\n-\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else if (token.isValue()) {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {\n- value = parser.objectText();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {\n- analyzer = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchPhraseQueryBuilder.SLOP_FIELD)) {\n- slop = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {\n- maxExpansion = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n+ XContentParser.Token token;\n+ String currentFieldName = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n+ // skip\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[match_phrase_prefix] query doesn't support multiple \" +\n+ \"fields, found [\" + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token.isValue()) {\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {\n+ value = parser.objectText();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {\n+ analyzer = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchPhraseQueryBuilder.SLOP_FIELD)) {\n+ slop = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {\n+ maxExpansion = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n+ }\n } else {\n throw new ParsingException(parser.getTokenLocation(),\n- \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n+ \"[\" + NAME + \"] unknown token [\" + token + \"] after [\" + currentFieldName + \"]\");\n }\n- } else {\n- throw new ParsingException(parser.getTokenLocation(),\n- \"[\" + NAME + \"] unknown token [\" + token + \"] after [\" + currentFieldName + \"]\");\n }\n+ } else {\n+ fieldName = parser.currentName();\n+ value = parser.objectText();\n }\n- parser.nextToken();\n- } else {\n- value = parser.objectText();\n- // move to the next token\n- token = parser.nextToken();\n- if (token != XContentParser.Token.END_OBJECT) {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME\n- + \"] query parsed in simplified form, with direct field name, \"\n- + \"but included more options than just the field name, possibly use its 'options' form, with 'query' element?\");\n- }\n- }\n-\n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No text specified for text query\");\n }\n \n MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder(fieldName, value);", "filename": "core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.search.Query;\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParsingException;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -49,7 +50,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue\n private int slop = MatchQuery.DEFAULT_PHRASE_SLOP;\n \n public MatchPhraseQueryBuilder(String fieldName, Object value) {\n- if (fieldName == null) {\n+ if (Strings.isEmpty(fieldName)) {\n throw new IllegalArgumentException(\"[\" + NAME + \"] requires fieldName\");\n }\n if (value == null) {\n@@ -163,59 +164,52 @@ protected int doHashCode() {\n \n public static Optional<MatchPhraseQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n-\n- XContentParser.Token token = parser.nextToken();\n- if (token != XContentParser.Token.FIELD_NAME) {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME + \"] query malformed, no field\");\n- }\n- String fieldName = parser.currentName();\n-\n+ String fieldName = null;\n Object value = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n String analyzer = null;\n int slop = MatchQuery.DEFAULT_PHRASE_SLOP;\n String queryName = null;\n-\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else if (token.isValue()) {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {\n- value = parser.objectText();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {\n- analyzer = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {\n- slop = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n+ String currentFieldName = null;\n+ XContentParser.Token token;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n+ // skip\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[match_phrase] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token.isValue()) {\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {\n+ value = parser.objectText();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {\n+ analyzer = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {\n+ slop = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n+ }\n } else {\n throw new ParsingException(parser.getTokenLocation(),\n- \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n+ \"[\" + NAME + \"] unknown token [\" + token + \"] after [\" + currentFieldName + \"]\");\n }\n- } else {\n- throw new ParsingException(parser.getTokenLocation(),\n- \"[\" + NAME + \"] unknown token [\" + token + \"] after [\" + currentFieldName + \"]\");\n }\n+ } else {\n+ fieldName = parser.currentName();\n+ value = parser.objectText();\n }\n- parser.nextToken();\n- } else {\n- value = parser.objectText();\n- // move to the next token\n- token = parser.nextToken();\n- if (token != XContentParser.Token.END_OBJECT) {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME\n- + \"] query parsed in simplified form, with direct field name, \"\n- + \"but included more options than just the field name, possibly use its 'options' form, with 'query' element?\");\n- }\n- }\n-\n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No text specified for text query\");\n }\n \n MatchPhraseQueryBuilder matchQuery = new MatchPhraseQueryBuilder(fieldName, value);", "filename": "core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java", "status": "modified" }, { "diff": "@@ -510,13 +510,7 @@ public String getWriteableName() {\n \n public static Optional<MatchQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n-\n- XContentParser.Token token = parser.nextToken();\n- if (token != XContentParser.Token.FIELD_NAME) {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + MatchQueryBuilder.NAME + \"] query malformed, no field\");\n- }\n- String fieldName = parser.currentName();\n-\n+ String fieldName = null;\n MatchQuery.Type type = MatchQuery.Type.BOOLEAN;\n Object value = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n@@ -533,80 +527,84 @@ public static Optional<MatchQueryBuilder> fromXContent(QueryParseContext parseCo\n Float cutOffFrequency = null;\n ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY;\n String queryName = null;\n-\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else if (token.isValue()) {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {\n- value = parser.objectText();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {\n- String tStr = parser.text();\n- if (\"boolean\".equals(tStr)) {\n- type = MatchQuery.Type.BOOLEAN;\n- } else if (\"phrase\".equals(tStr)) {\n- type = MatchQuery.Type.PHRASE;\n- } else if (\"phrase_prefix\".equals(tStr) || (\"phrasePrefix\".equals(tStr))) {\n- type = MatchQuery.Type.PHRASE_PREFIX;\n- } else {\n- throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME + \"] query does not support type \" + tStr);\n- }\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {\n- analyzer = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {\n- slop = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {\n- fuzziness = Fuzziness.parse(parser);\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {\n- prefixLength = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {\n- maxExpansion = parser.intValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) {\n- operator = Operator.fromString(parser.text());\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {\n- minimumShouldMatch = parser.textOrNull();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) {\n- fuzzyRewrite = parser.textOrNull();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) {\n- fuzzyTranspositions = parser.booleanValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) {\n- lenient = parser.booleanValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {\n- cutOffFrequency = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) {\n- String zeroTermsDocs = parser.text();\n- if (\"none\".equalsIgnoreCase(zeroTermsDocs)) {\n- zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE;\n- } else if (\"all\".equalsIgnoreCase(zeroTermsDocs)) {\n- zeroTermsQuery = MatchQuery.ZeroTermsQuery.ALL;\n+ String currentFieldName = null;\n+ XContentParser.Token token;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n+ // skip\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[match] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token.isValue()) {\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {\n+ value = parser.objectText();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {\n+ String tStr = parser.text();\n+ if (\"boolean\".equals(tStr)) {\n+ type = MatchQuery.Type.BOOLEAN;\n+ } else if (\"phrase\".equals(tStr)) {\n+ type = MatchQuery.Type.PHRASE;\n+ } else if (\"phrase_prefix\".equals(tStr) || (\"phrasePrefix\".equals(tStr))) {\n+ type = MatchQuery.Type.PHRASE_PREFIX;\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(), \"[\" + NAME + \"] query does not support type \" + tStr);\n+ }\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {\n+ analyzer = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {\n+ slop = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {\n+ fuzziness = Fuzziness.parse(parser);\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {\n+ prefixLength = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {\n+ maxExpansion = parser.intValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) {\n+ operator = Operator.fromString(parser.text());\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {\n+ minimumShouldMatch = parser.textOrNull();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) {\n+ fuzzyRewrite = parser.textOrNull();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) {\n+ fuzzyTranspositions = parser.booleanValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) {\n+ lenient = parser.booleanValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {\n+ cutOffFrequency = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) {\n+ String zeroTermsDocs = parser.text();\n+ if (\"none\".equalsIgnoreCase(zeroTermsDocs)) {\n+ zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE;\n+ } else if (\"all\".equalsIgnoreCase(zeroTermsDocs)) {\n+ zeroTermsQuery = MatchQuery.ZeroTermsQuery.ALL;\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"Unsupported zero_terms_docs value [\" + zeroTermsDocs + \"]\");\n+ }\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n } else {\n throw new ParsingException(parser.getTokenLocation(),\n- \"Unsupported zero_terms_docs value [\" + zeroTermsDocs + \"]\");\n+ \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n }\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n } else {\n throw new ParsingException(parser.getTokenLocation(),\n- \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n+ \"[\" + NAME + \"] unknown token [\" + token + \"] after [\" + currentFieldName + \"]\");\n }\n- } else {\n- throw new ParsingException(parser.getTokenLocation(),\n- \"[\" + NAME + \"] unknown token [\" + token + \"] after [\" + currentFieldName + \"]\");\n }\n- }\n- parser.nextToken();\n- } else {\n- value = parser.objectText();\n- // move to the next token\n- token = parser.nextToken();\n- if (token != XContentParser.Token.END_OBJECT) {\n- throw new ParsingException(parser.getTokenLocation(), \"[match] query parsed in simplified form, with direct field name, \"\n- + \"but included more options than just the field name, possibly use its 'options' form, with 'query' element?\");\n+ } else {\n+ fieldName = parser.currentName();\n+ value = parser.objectText();\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java", "status": "modified" }, { "diff": "@@ -64,7 +64,7 @@ public PrefixQueryBuilder(String fieldName, String value) {\n throw new IllegalArgumentException(\"field name is null or empty\");\n }\n if (value == null) {\n- throw new IllegalArgumentException(\"value cannot be null.\");\n+ throw new IllegalArgumentException(\"value cannot be null\");\n }\n this.fieldName = fieldName;\n this.value = value;\n@@ -120,7 +120,7 @@ public void doXContent(XContentBuilder builder, Params params) throws IOExceptio\n public static Optional<PrefixQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n \n- String fieldName = parser.currentName();\n+ String fieldName = null;\n String value = null;\n String rewrite = null;\n \n@@ -134,6 +134,10 @@ public static Optional<PrefixQueryBuilder> fromXContent(QueryParseContext parseC\n } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n // skip\n } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[prefix] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n fieldName = currentFieldName;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n@@ -149,19 +153,16 @@ public static Optional<PrefixQueryBuilder> fromXContent(QueryParseContext parseC\n rewrite = parser.textOrNull();\n } else {\n throw new ParsingException(parser.getTokenLocation(),\n- \"[regexp] query does not support [\" + currentFieldName + \"]\");\n+ \"[prefix] query does not support [\" + currentFieldName + \"]\");\n }\n }\n }\n } else {\n- fieldName = currentFieldName;\n- value = parser.textOrNull();\n+ fieldName = currentFieldName;\n+ value = parser.textOrNull();\n }\n }\n \n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No value specified for prefix query\");\n- }\n return Optional.of(new PrefixQueryBuilder(fieldName, value)\n .rewrite(rewrite)\n .boost(boost)", "filename": "core/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java", "status": "modified" }, { "diff": "@@ -115,10 +115,11 @@ public Optional<QueryBuilder> parseInnerQueryBuilder() throws IOException {\n @SuppressWarnings(\"unchecked\")\n Optional<QueryBuilder> result = (Optional<QueryBuilder>) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher,\n parser.getTokenLocation()).fromXContent(this);\n- if (parser.currentToken() == XContentParser.Token.END_OBJECT) {\n- // if we are at END_OBJECT, move to the next one...\n- parser.nextToken();\n+ if (parser.currentToken() != XContentParser.Token.END_OBJECT) {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"[\" + queryName + \"] malformed query, expected [END_OBJECT] but found [\" + parser.currentToken() + \"]\");\n }\n+ parser.nextToken();\n return result;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java", "status": "modified" }, { "diff": "@@ -318,6 +318,10 @@ public static Optional<RangeQueryBuilder> fromXContent(QueryParseContext parseCo\n } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n // skip\n } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[range] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n fieldName = currentFieldName;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {", "filename": "core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java", "status": "modified" }, { "diff": "@@ -77,7 +77,7 @@ public RegexpQueryBuilder(String fieldName, String value) {\n throw new IllegalArgumentException(\"field name is null or empty\");\n }\n if (value == null) {\n- throw new IllegalArgumentException(\"value cannot be null.\");\n+ throw new IllegalArgumentException(\"value cannot be null\");\n }\n this.fieldName = fieldName;\n this.value = value;\n@@ -180,10 +180,8 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n \n public static Optional<RegexpQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n-\n- String fieldName = parser.currentName();\n+ String fieldName = null;\n String rewrite = null;\n-\n String value = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n int flagsValue = RegexpQueryBuilder.DEFAULT_FLAGS_VALUE;\n@@ -197,6 +195,10 @@ public static Optional<RegexpQueryBuilder> fromXContent(QueryParseContext parseC\n } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n // skip\n } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[regexp] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n fieldName = currentFieldName;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n@@ -233,9 +235,6 @@ public static Optional<RegexpQueryBuilder> fromXContent(QueryParseContext parseC\n }\n }\n \n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No value specified for regexp query\");\n- }\n return Optional.of(new RegexpQueryBuilder(fieldName, value)\n .flags(flagsValue)\n .maxDeterminizedStates(maxDeterminizedStates)", "filename": "core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java", "status": "modified" }, { "diff": "@@ -186,7 +186,7 @@ public String value() {\n /** Add a field to run the query against. */\n public SimpleQueryStringBuilder field(String field) {\n if (Strings.isEmpty(field)) {\n- throw new IllegalArgumentException(\"supplied field is null or empty.\");\n+ throw new IllegalArgumentException(\"supplied field is null or empty\");\n }\n this.fieldsAndWeights.put(field, AbstractQueryBuilder.DEFAULT_BOOST);\n return this;\n@@ -195,7 +195,7 @@ public SimpleQueryStringBuilder field(String field) {\n /** Add a field to run the query against with a specific boost. */\n public SimpleQueryStringBuilder field(String field, float boost) {\n if (Strings.isEmpty(field)) {\n- throw new IllegalArgumentException(\"supplied field is null or empty.\");\n+ throw new IllegalArgumentException(\"supplied field is null or empty\");\n }\n this.fieldsAndWeights.put(field, boost);\n return this;", "filename": "core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java", "status": "modified" }, { "diff": "@@ -94,49 +94,43 @@ protected SpanQuery doToQuery(QueryShardContext context) throws IOException {\n \n public static Optional<SpanTermQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {\n XContentParser parser = parseContext.parser();\n-\n- XContentParser.Token token = parser.currentToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- token = parser.nextToken();\n- }\n-\n- assert token == XContentParser.Token.FIELD_NAME;\n- String fieldName = parser.currentName();\n-\n-\n+ String fieldName = null;\n Object value = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n String queryName = null;\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {\n- value = parser.objectBytes();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, BaseTermQueryBuilder.VALUE_FIELD)) {\n- value = parser.objectBytes();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n+ String currentFieldName = null;\n+ XContentParser.Token token;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[span_term] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n } else {\n- throw new ParsingException(parser.getTokenLocation(),\n- \"[span_term] query does not support [\" + currentFieldName + \"]\");\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {\n+ value = parser.objectBytes();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, BaseTermQueryBuilder.VALUE_FIELD)) {\n+ value = parser.objectBytes();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"[span_term] query does not support [\" + currentFieldName + \"]\");\n+ }\n }\n }\n+ } else {\n+ fieldName = parser.currentName();\n+ value = parser.objectBytes();\n }\n- parser.nextToken();\n- } else {\n- value = parser.objectBytes();\n- // move to the next token\n- parser.nextToken();\n- }\n-\n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No value specified for term query\");\n }\n \n SpanTermQueryBuilder result = new SpanTermQueryBuilder(fieldName, value);", "filename": "core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java", "status": "modified" }, { "diff": "@@ -75,7 +75,7 @@ public WildcardQueryBuilder(String fieldName, String value) {\n throw new IllegalArgumentException(\"field name is null or empty\");\n }\n if (value == null) {\n- throw new IllegalArgumentException(\"value cannot be null.\");\n+ throw new IllegalArgumentException(\"value cannot be null\");\n }\n this.fieldName = fieldName;\n this.value = value;\n@@ -135,49 +135,50 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n \n public static Optional<WildcardQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {\n XContentParser parser = parseContext.parser();\n-\n- XContentParser.Token token = parser.nextToken();\n- if (token != XContentParser.Token.FIELD_NAME) {\n- throw new ParsingException(parser.getTokenLocation(), \"[wildcard] query malformed, no field\");\n- }\n- String fieldName = parser.currentName();\n+ String fieldName = null;\n String rewrite = null;\n-\n String value = null;\n float boost = AbstractQueryBuilder.DEFAULT_BOOST;\n String queryName = null;\n- token = parser.nextToken();\n- if (token == XContentParser.Token.START_OBJECT) {\n- String currentFieldName = null;\n- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n- if (token == XContentParser.Token.FIELD_NAME) {\n- currentFieldName = parser.currentName();\n- } else {\n- if (parseContext.getParseFieldMatcher().match(currentFieldName, WILDCARD_FIELD)) {\n- value = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {\n- value = parser.text();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n- boost = parser.floatValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {\n- rewrite = parser.textOrNull();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n- queryName = parser.text();\n+ String currentFieldName = null;\n+ XContentParser.Token token;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (parseContext.isDeprecatedSetting(currentFieldName)) {\n+ // skip\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (fieldName != null) {\n+ throw new ParsingException(parser.getTokenLocation(), \"[wildcard] query doesn't support multiple fields, found [\"\n+ + fieldName + \"] and [\" + currentFieldName + \"]\");\n+ }\n+ fieldName = currentFieldName;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n } else {\n- throw new ParsingException(parser.getTokenLocation(),\n- \"[wildcard] query does not support [\" + currentFieldName + \"]\");\n+ if (parseContext.getParseFieldMatcher().match(currentFieldName, WILDCARD_FIELD)) {\n+ value = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {\n+ value = parser.text();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {\n+ boost = parser.floatValue();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {\n+ rewrite = parser.textOrNull();\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {\n+ queryName = parser.text();\n+ } else {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"[wildcard] query does not support [\" + currentFieldName + \"]\");\n+ }\n }\n }\n+ } else {\n+ fieldName = parser.currentName();\n+ value = parser.text();\n }\n- parser.nextToken();\n- } else {\n- value = parser.text();\n- parser.nextToken();\n }\n \n- if (value == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"No value specified for wildcard query\");\n- }\n return Optional.of(new WildcardQueryBuilder(fieldName, value)\n .rewrite(rewrite)\n .boost(boost)", "filename": "core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java", "status": "modified" }, { "diff": "@@ -30,23 +30,11 @@ public abstract class AbstractTermQueryTestCase<QB extends BaseTermQueryBuilder<\n protected abstract QB createQueryBuilder(String fieldName, Object value);\n \n public void testIllegalArguments() throws QueryShardException {\n- try {\n- if (randomBoolean()) {\n- createQueryBuilder(null, randomAsciiOfLengthBetween(1, 30));\n- } else {\n- createQueryBuilder(\"\", randomAsciiOfLengthBetween(1, 30));\n- }\n- fail(\"fieldname cannot be null or empty\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n-\n- try {\n- createQueryBuilder(\"field\", null);\n- fail(\"value cannot be null or empty\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ String term = randomAsciiOfLengthBetween(1, 30);\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createQueryBuilder(null, term));\n+ assertEquals(\"field name is null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> createQueryBuilder(\"\", term));\n+ assertEquals(\"field name is null or empty\", e.getMessage());\n }\n \n @Override", "filename": "core/src/test/java/org/elasticsearch/index/query/AbstractTermQueryTestCase.java", "status": "modified" }, { "diff": "@@ -163,30 +163,10 @@ protected Map<String, BoolQueryBuilder> getAlternateVersions() {\n \n public void testIllegalArguments() {\n BoolQueryBuilder booleanQuery = new BoolQueryBuilder();\n-\n- try {\n- booleanQuery.must(null);\n- fail(\"cannot be null\");\n- } catch (IllegalArgumentException e) {\n- }\n-\n- try {\n- booleanQuery.mustNot(null);\n- fail(\"cannot be null\");\n- } catch (IllegalArgumentException e) {\n- }\n-\n- try {\n- booleanQuery.filter(null);\n- fail(\"cannot be null\");\n- } catch (IllegalArgumentException e) {\n- }\n-\n- try {\n- booleanQuery.should(null);\n- fail(\"cannot be null\");\n- } catch (IllegalArgumentException e) {\n- }\n+ expectThrows(IllegalArgumentException.class, () -> booleanQuery.must(null));\n+ expectThrows(IllegalArgumentException.class, () -> booleanQuery.mustNot(null));\n+ expectThrows(IllegalArgumentException.class, () -> booleanQuery.filter(null));\n+ expectThrows(IllegalArgumentException.class, () -> booleanQuery.should(null));\n }\n \n // https://github.com/elastic/elasticsearch/issues/7240", "filename": "core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -54,26 +54,10 @@ protected void doAssertLuceneQuery(BoostingQueryBuilder queryBuilder, Query quer\n }\n \n public void testIllegalArguments() {\n- try {\n- new BoostingQueryBuilder(null, new MatchAllQueryBuilder());\n- fail(\"must not be null\");\n- } catch (IllegalArgumentException e) {\n- //\n- }\n-\n- try {\n- new BoostingQueryBuilder(new MatchAllQueryBuilder(), null);\n- fail(\"must not be null\");\n- } catch (IllegalArgumentException e) {\n- //\n- }\n-\n- try {\n- new BoostingQueryBuilder(new MatchAllQueryBuilder(), new MatchAllQueryBuilder()).negativeBoost(-1.0f);\n- fail(\"must not be negative\");\n- } catch (IllegalArgumentException e) {\n- //\n- }\n+ expectThrows(IllegalArgumentException.class, () -> new BoostingQueryBuilder(null, new MatchAllQueryBuilder()));\n+ expectThrows(IllegalArgumentException.class, () -> new BoostingQueryBuilder(new MatchAllQueryBuilder(), null));\n+ expectThrows(IllegalArgumentException.class,\n+ () -> new BoostingQueryBuilder(new MatchAllQueryBuilder(), new MatchAllQueryBuilder()).negativeBoost(-1.0f));\n }\n \n public void testFromJson() throws IOException {\n@@ -103,7 +87,6 @@ public void testFromJson() throws IOException {\n \n BoostingQueryBuilder queryBuilder = (BoostingQueryBuilder) parseQuery(query);\n checkGeneratedJson(query, queryBuilder);\n-\n assertEquals(query, 42, queryBuilder.boost(), 0.00001);\n assertEquals(query, 23, queryBuilder.negativeBoost(), 0.00001);\n assertEquals(query, 8, queryBuilder.negativeQuery().boost(), 0.00001);", "filename": "core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -21,9 +21,12 @@\n \n import org.apache.lucene.queries.ExtendedCommonTermsQuery;\n import org.apache.lucene.search.Query;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.test.AbstractQueryTestCase;\n \n import java.io.IOException;\n+import java.util.HashMap;\n+import java.util.Map;\n \n import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery;\n import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;\n@@ -81,6 +84,20 @@ protected CommonTermsQueryBuilder doCreateTestQueryBuilder() {\n return query;\n }\n \n+ @Override\n+ protected Map<String, CommonTermsQueryBuilder> getAlternateVersions() {\n+ Map<String, CommonTermsQueryBuilder> alternateVersions = new HashMap<>();\n+ CommonTermsQueryBuilder commonTermsQuery = new CommonTermsQueryBuilder(randomAsciiOfLengthBetween(1, 10),\n+ randomAsciiOfLengthBetween(1, 10));\n+ String contentString = \"{\\n\" +\n+ \" \\\"common\\\" : {\\n\" +\n+ \" \\\"\" + commonTermsQuery.fieldName() + \"\\\" : \\\"\" + commonTermsQuery.value() + \"\\\"\\n\" +\n+ \" }\\n\" +\n+ \"}\";\n+ alternateVersions.put(contentString, commonTermsQuery);\n+ return alternateVersions;\n+ }\n+\n @Override\n protected void doAssertLuceneQuery(CommonTermsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {\n assertThat(query, instanceOf(ExtendedCommonTermsQuery.class));\n@@ -90,23 +107,12 @@ protected void doAssertLuceneQuery(CommonTermsQueryBuilder queryBuilder, Query q\n }\n \n public void testIllegalArguments() {\n- try {\n- if (randomBoolean()) {\n- new CommonTermsQueryBuilder(null, \"text\");\n- } else {\n- new CommonTermsQueryBuilder(\"\", \"text\");\n- }\n- fail(\"must be non null\");\n- } catch (IllegalArgumentException e) {\n- // okay\n- }\n-\n- try {\n- new CommonTermsQueryBuilder(\"fieldName\", null);\n- fail(\"must be non null\");\n- } catch (IllegalArgumentException e) {\n- // okay\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder(null, \"text\"));\n+ assertEquals(\"field name is null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder(\"\", \"text\"));\n+ assertEquals(\"field name is null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder(\"fieldName\", null));\n+ assertEquals(\"text cannot be null\", e.getMessage());\n }\n \n public void testFromJson() throws IOException {\n@@ -173,4 +179,20 @@ public void testCommonTermsQuery4() throws IOException {\n ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;\n assertThat(ectQuery.isCoordDisabled(), equalTo(disableCoord));\n }\n+\n+ public void testParseFailsWithMultipleFields() throws IOException {\n+ String json = \"{\\n\" +\n+ \" \\\"common\\\" : {\\n\" +\n+ \" \\\"message1\\\" : {\\n\" +\n+ \" \\\"query\\\" : \\\"nelly the elephant not as a cartoon\\\"\\n\" +\n+ \" },\\n\" +\n+ \" \\\"message2\\\" : {\\n\" +\n+ \" \\\"query\\\" : \\\"nelly the elephant not as a cartoon\\\"\\n\" +\n+ \" }\\n\" +\n+ \" }\\n\" +\n+ \"}\";\n+\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));\n+ assertEquals(\"[common] query doesn't support multiple fields, found [message1] and [message2]\", e.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -61,12 +61,8 @@ protected void doAssertLuceneQuery(ConstantScoreQueryBuilder queryBuilder, Query\n */\n public void testFilterElement() throws IOException {\n String queryString = \"{ \\\"\" + ConstantScoreQueryBuilder.NAME + \"\\\" : {} }\";\n- try {\n- parseQuery(queryString);\n- fail(\"Expected ParsingException\");\n- } catch (ParsingException e) {\n- assertThat(e.getMessage(), containsString(\"requires a 'filter' element\"));\n- }\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(queryString));\n+ assertThat(e.getMessage(), containsString(\"requires a 'filter' element\"));\n }\n \n /**\n@@ -77,12 +73,8 @@ public void testMultipleFilterElements() throws IOException {\n \"\\\"filter\\\" : { \\\"term\\\": { \\\"foo\\\": \\\"a\\\" } },\\n\" +\n \"\\\"filter\\\" : { \\\"term\\\": { \\\"foo\\\": \\\"x\\\" } },\\n\" +\n \"} }\";\n- try {\n- parseQuery(queryString);\n- fail(\"Expected ParsingException\");\n- } catch (ParsingException e) {\n- assertThat(e.getMessage(), containsString(\"accepts only one 'filter' element\"));\n- }\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(queryString));\n+ assertThat(e.getMessage(), containsString(\"accepts only one 'filter' element\"));\n }\n \n /**\n@@ -93,12 +85,8 @@ public void testNoArrayAsFilterElements() throws IOException {\n \"\\\"filter\\\" : [ { \\\"term\\\": { \\\"foo\\\": \\\"a\\\" } },\\n\" +\n \"{ \\\"term\\\": { \\\"foo\\\": \\\"x\\\" } } ]\\n\" +\n \"} }\";\n- try {\n- parseQuery(queryString);\n- fail(\"Expected ParsingException\");\n- } catch (ParsingException e) {\n- assertThat(e.getMessage(), containsString(\"unexpected token [START_ARRAY]\"));\n- }\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(queryString));\n+ assertThat(e.getMessage(), containsString(\"unexpected token [START_ARRAY]\"));\n }\n \n public void testIllegalArguments() {", "filename": "core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -102,12 +102,7 @@ public void testInnerQueryEmptyException() throws IOException {\n \n public void testIllegalArguments() {\n DisMaxQueryBuilder disMaxQuery = new DisMaxQueryBuilder();\n- try {\n- disMaxQuery.add(null);\n- fail(\"cannot be null\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ expectThrows(IllegalArgumentException.class, () -> disMaxQuery.add(null));\n }\n \n public void testToQueryInnerPrefixQuery() throws Exception {", "filename": "core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -56,24 +56,10 @@ protected void doAssertLuceneQuery(FieldMaskingSpanQueryBuilder queryBuilder, Qu\n }\n \n public void testIllegalArguments() {\n- try {\n- new FieldMaskingSpanQueryBuilder(null, \"maskedField\");\n- fail(\"must be non null\");\n- } catch (IllegalArgumentException e) {\n- // okay\n- }\n-\n- try {\n- SpanQueryBuilder span = new SpanTermQueryBuilder(\"name\", \"value\");\n- if (randomBoolean()) {\n- new FieldMaskingSpanQueryBuilder(span, null);\n- } else {\n- new FieldMaskingSpanQueryBuilder(span, \"\");\n- }\n- fail(\"must be non null\");\n- } catch (IllegalArgumentException e) {\n- // okay\n- }\n+ expectThrows(IllegalArgumentException.class, () -> new FieldMaskingSpanQueryBuilder(null, \"maskedField\"));\n+ SpanQueryBuilder span = new SpanTermQueryBuilder(\"name\", \"value\");\n+ expectThrows(IllegalArgumentException.class, () -> new FieldMaskingSpanQueryBuilder(span, null));\n+ expectThrows(IllegalArgumentException.class, () -> new FieldMaskingSpanQueryBuilder(span, \"\"));\n }\n \n public void testFromJson() throws IOException {\n@@ -93,10 +79,8 @@ public void testFromJson() throws IOException {\n \" \\\"_name\\\" : \\\"KPI\\\"\\n\" +\n \" }\\n\" +\n \"}\";\n-\n FieldMaskingSpanQueryBuilder parsed = (FieldMaskingSpanQueryBuilder) parseQuery(json);\n checkGeneratedJson(json, parsed);\n-\n assertEquals(json, 42.0, parsed.boost(), 0.00001);\n assertEquals(json, 0.23, parsed.innerQuery().boost(), 0.00001);\n }", "filename": "core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -23,12 +23,15 @@\n import org.apache.lucene.search.BoostQuery;\n import org.apache.lucene.search.FuzzyQuery;\n import org.apache.lucene.search.Query;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.unit.Fuzziness;\n import org.elasticsearch.test.AbstractQueryTestCase;\n-import org.hamcrest.Matchers;\n \n import java.io.IOException;\n+import java.util.HashMap;\n+import java.util.Map;\n \n+import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.instanceOf;\n \n@@ -55,47 +58,42 @@ protected FuzzyQueryBuilder doCreateTestQueryBuilder() {\n return query;\n }\n \n+ @Override\n+ protected Map<String, FuzzyQueryBuilder> getAlternateVersions() {\n+ Map<String, FuzzyQueryBuilder> alternateVersions = new HashMap<>();\n+ FuzzyQueryBuilder fuzzyQuery = new FuzzyQueryBuilder(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));\n+ String contentString = \"{\\n\" +\n+ \" \\\"fuzzy\\\" : {\\n\" +\n+ \" \\\"\" + fuzzyQuery.fieldName() + \"\\\" : \\\"\" + fuzzyQuery.value() + \"\\\"\\n\" +\n+ \" }\\n\" +\n+ \"}\";\n+ alternateVersions.put(contentString, fuzzyQuery);\n+ return alternateVersions;\n+ }\n+\n @Override\n protected void doAssertLuceneQuery(FuzzyQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {\n assertThat(query, instanceOf(FuzzyQuery.class));\n }\n \n public void testIllegalArguments() {\n- try {\n- new FuzzyQueryBuilder(null, \"text\");\n- fail(\"must not be null\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new FuzzyQueryBuilder(null, \"text\"));\n+ assertEquals(\"field name cannot be null or empty\", e.getMessage());\n \n- try {\n- new FuzzyQueryBuilder(\"\", \"text\");\n- fail(\"must not be empty\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ e = expectThrows(IllegalArgumentException.class, () -> new FuzzyQueryBuilder(\"\", \"text\"));\n+ assertEquals(\"field name cannot be null or empty\", e.getMessage());\n \n- try {\n- new FuzzyQueryBuilder(\"field\", null);\n- fail(\"must not be null\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ e = expectThrows(IllegalArgumentException.class, () -> new FuzzyQueryBuilder(\"field\", null));\n+ assertEquals(\"query value cannot be null\", e.getMessage());\n }\n \n public void testUnsupportedFuzzinessForStringType() throws IOException {\n QueryShardContext context = createShardContext();\n context.setAllowUnmappedFields(true);\n-\n FuzzyQueryBuilder fuzzyQueryBuilder = new FuzzyQueryBuilder(STRING_FIELD_NAME, \"text\");\n fuzzyQueryBuilder.fuzziness(Fuzziness.build(randomFrom(\"a string which is not auto\", \"3h\", \"200s\")));\n-\n- try {\n- fuzzyQueryBuilder.toQuery(context);\n- fail(\"should have failed with NumberFormatException\");\n- } catch (NumberFormatException e) {\n- assertThat(e.getMessage(), Matchers.containsString(\"For input string\"));\n- }\n+ NumberFormatException e = expectThrows(NumberFormatException.class, () -> fuzzyQueryBuilder.toQuery(context));\n+ assertThat(e.getMessage(), containsString(\"For input string\"));\n }\n \n public void testToQueryWithStringField() throws IOException {\n@@ -119,7 +117,6 @@ public void testToQueryWithStringField() throws IOException {\n assertThat(fuzzyQuery.getTerm(), equalTo(new Term(STRING_FIELD_NAME, \"sh\")));\n assertThat(fuzzyQuery.getMaxEdits(), equalTo(Fuzziness.AUTO.asDistance(\"sh\")));\n assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));\n-\n }\n \n public void testToQueryWithNumericField() throws IOException {\n@@ -157,4 +154,20 @@ public void testFromJson() throws IOException {\n assertEquals(json, 42.0, parsed.boost(), 0.00001);\n assertEquals(json, 2, parsed.fuzziness().asFloat(), 0f);\n }\n+\n+ public void testParseFailsWithMultipleFields() throws IOException {\n+ String json = \"{\\n\" +\n+ \" \\\"fuzzy\\\" : {\\n\" +\n+ \" \\\"message1\\\" : {\\n\" +\n+ \" \\\"value\\\" : \\\"this is a test\\\"\\n\" +\n+ \" },\\n\" +\n+ \" \\\"message2\\\" : {\\n\" +\n+ \" \\\"value\\\" : \\\"this is a test\\\"\\n\" +\n+ \" }\\n\" +\n+ \" }\\n\" +\n+ \"}\";\n+\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));\n+ assertEquals(\"[fuzzy] query doesn't support multiple fields, found [message1] and [message2]\", e.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -44,7 +44,6 @@\n import static org.hamcrest.CoreMatchers.notNullValue;\n import static org.hamcrest.Matchers.closeTo;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.is;\n \n public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBoundingBoxQueryBuilder> {\n /** Randomly generate either NaN or one of the two infinity values. */\n@@ -104,22 +103,14 @@ public void testValidationNullFieldname() {\n \n public void testValidationNullType() {\n GeoBoundingBoxQueryBuilder qb = new GeoBoundingBoxQueryBuilder(\"teststring\");\n- try {\n- qb.type((GeoExecType) null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"Type is not allowed to be null.\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> qb.type((GeoExecType) null));\n+ assertEquals(\"Type is not allowed to be null.\", e.getMessage());\n }\n \n public void testValidationNullTypeString() {\n GeoBoundingBoxQueryBuilder qb = new GeoBoundingBoxQueryBuilder(\"teststring\");\n- try {\n- qb.type((String) null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"cannot parse type from null string\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> qb.type((String) null));\n+ assertEquals(\"cannot parse type from null string\", e.getMessage());\n }\n \n @Override\n@@ -130,27 +121,17 @@ public void testToQuery() throws IOException {\n \n public void testExceptionOnMissingTypes() throws IOException {\n assumeTrue(\"test runs only when at least a type is registered\", getCurrentTypes().length == 0);\n- try {\n- super.testToQuery();\n- fail(\"Expected IllegalArgumentException\");\n- } catch (QueryShardException e) {\n- assertThat(e.getMessage(), is(\"failed to find geo_point field [mapped_geo_point]\"));\n- }\n+ QueryShardException e = expectThrows(QueryShardException.class, () -> super.testToQuery());\n+ assertEquals(\"failed to find geo_point field [mapped_geo_point]\", e.getMessage());\n }\n \n public void testBrokenCoordinateCannotBeSet() {\n PointTester[] testers = { new TopTester(), new LeftTester(), new BottomTester(), new RightTester() };\n-\n GeoBoundingBoxQueryBuilder builder = createTestQueryBuilder();\n builder.setValidationMethod(GeoValidationMethod.STRICT);\n \n for (PointTester tester : testers) {\n- try {\n- tester.invalidateCoordinate(builder, true);\n- fail(\"expected exception for broken \" + tester.getClass().getName() + \" coordinate\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ expectThrows(IllegalArgumentException.class, () -> tester.invalidateCoordinate(builder, true));\n }\n }\n \n@@ -215,12 +196,9 @@ public void testTopBottomCannotBeFlipped() {\n \n assumeTrue(\"top should not be equal to bottom for flip check\", top != bottom);\n logger.info(\"top: {} bottom: {}\", top, bottom);\n- try {\n- builder.setValidationMethod(GeoValidationMethod.STRICT).setCorners(bottom, left, top, right);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), containsString(\"top is below bottom corner:\"));\n- }\n+ builder.setValidationMethod(GeoValidationMethod.STRICT);\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.setCorners(bottom, left, top, right));\n+ assertThat(e.getMessage(), containsString(\"top is below bottom corner:\"));\n }\n \n public void testTopBottomCanBeFlippedOnIgnoreMalformed() {\n@@ -482,7 +460,7 @@ public void testFromJson() throws IOException {\n assertEquals(json, 40.01, parsed.bottomRight().getLat(), 0.0001);\n assertEquals(json, 1.0, parsed.boost(), 0.0001);\n assertEquals(json, GeoExecType.MEMORY, parsed.type());\n- json =\n+ String deprecatedJson =\n \"{\\n\" +\n \" \\\"geo_bbox\\\" : {\\n\" +\n \" \\\"pin.location\\\" : {\\n\" +\n@@ -498,12 +476,8 @@ public void testFromJson() throws IOException {\n QueryBuilder parsedGeoBboxShortcut = parseQuery(json, ParseFieldMatcher.EMPTY);\n assertThat(parsedGeoBboxShortcut, equalTo(parsed));\n \n- try {\n- parseQuery(json);\n- fail(\"parse query should have failed in strict mode\");\n- } catch(IllegalArgumentException e) {\n- assertThat(e.getMessage(), equalTo(\"Deprecated field [geo_bbox] used, expected [geo_bounding_box] instead\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(deprecatedJson));\n+ assertEquals(\"Deprecated field [geo_bbox] used, expected [geo_bounding_box] instead\", e.getMessage());\n }\n \n public void testFromJsonCoerceFails() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.apache.lucene.search.Query;\n import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery;\n import org.elasticsearch.Version;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.geo.GeoDistance;\n import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.common.geo.GeoUtils;\n@@ -85,82 +86,41 @@ protected GeoDistanceQueryBuilder doCreateTestQueryBuilder() {\n }\n \n public void testIllegalValues() {\n- try {\n- if (randomBoolean()) {\n- new GeoDistanceQueryBuilder(\"\");\n- } else {\n- new GeoDistanceQueryBuilder((String) null);\n- }\n- fail(\"must not be null or empty\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"fieldName must not be null or empty\"));\n- }\n-\n- GeoDistanceQueryBuilder query = new GeoDistanceQueryBuilder(\"fieldName\");\n- try {\n- if (randomBoolean()) {\n- query.distance(\"\");\n- } else {\n- query.distance(null);\n- }\n- fail(\"must not be null or empty\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"distance must not be null or empty\"));\n- }\n-\n- try {\n- if (randomBoolean()) {\n- query.distance(\"\", DistanceUnit.DEFAULT);\n- } else {\n- query.distance(null, DistanceUnit.DEFAULT);\n- }\n- fail(\"distance must not be null or empty\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"distance must not be null or empty\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GeoDistanceQueryBuilder(\"\"));\n+ assertEquals(\"fieldName must not be null or empty\", e.getMessage());\n \n- try {\n- if (randomBoolean()) {\n- query.distance(\"1\", null);\n- } else {\n- query.distance(1, null);\n- }\n- fail(\"distance must not be null\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"distance unit must not be null\"));\n- }\n-\n- try {\n- query.distance(randomIntBetween(Integer.MIN_VALUE, 0), DistanceUnit.DEFAULT);\n- fail(\"distance must be greater than zero\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"distance must be greater than zero\"));\n- }\n-\n- try {\n- if (randomBoolean()) {\n- query.geohash(null);\n- } else {\n- query.geohash(\"\");\n- }\n- fail(\"geohash must not be null\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"geohash must not be null or empty\"));\n- }\n+ e = expectThrows(IllegalArgumentException.class, () -> new GeoDistanceQueryBuilder((String) null));\n+ assertEquals(\"fieldName must not be null or empty\", e.getMessage());\n \n- try {\n- query.geoDistance(null);\n- fail(\"geodistance must not be null\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"geoDistance must not be null\"));\n- }\n-\n- try {\n- query.optimizeBbox(null);\n- fail(\"optimizeBbox must not be null\");\n- } catch (IllegalArgumentException ex) {\n- assertThat(ex.getMessage(), equalTo(\"optimizeBbox must not be null\"));\n- }\n+ GeoDistanceQueryBuilder query = new GeoDistanceQueryBuilder(\"fieldName\");\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(\"\"));\n+ assertEquals(\"distance must not be null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(null));\n+ assertEquals(\"distance must not be null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(\"\", DistanceUnit.DEFAULT));\n+ assertEquals(\"distance must not be null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(null, DistanceUnit.DEFAULT));\n+ assertEquals(\"distance must not be null or empty\", e.getMessage());\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(\"1\", null));\n+ assertEquals(\"distance unit must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(1, null));\n+ assertEquals(\"distance unit must not be null\", e.getMessage());\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> query.distance(\n+ randomIntBetween(Integer.MIN_VALUE, 0), DistanceUnit.DEFAULT));\n+ assertEquals(\"distance must be greater than zero\", e.getMessage());\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> query.geohash(null));\n+ assertEquals(\"geohash must not be null or empty\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> query.geohash(\"\"));\n+ assertEquals(\"geohash must not be null or empty\", e.getMessage());\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> query.geoDistance(null));\n+ assertEquals(\"geoDistance must not be null\", e.getMessage());\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> query.optimizeBbox(null));\n+ assertEquals(\"optimizeBbox must not be null\", e.getMessage());\n }\n \n /**\n@@ -474,4 +434,19 @@ public void testIgnoreUnmapped() throws IOException {\n QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(shardContext));\n assertThat(e.getMessage(), containsString(\"failed to find geo_point field [unmapped]\"));\n }\n+\n+ public void testParseFailsWithMultipleFields() throws IOException {\n+ String json = \"{\\n\" +\n+ \" \\\"geo_distance\\\" : {\\n\" +\n+ \" \\\"point1\\\" : {\\n\" +\n+ \" \\\"lat\\\" : 30, \\\"lon\\\" : 12\\n\" +\n+ \" },\\n\" +\n+ \" \\\"point2\\\" : {\\n\" +\n+ \" \\\"lat\\\" : 30, \\\"lon\\\" : 12\\n\" +\n+ \" }\\n\" +\n+ \" }\\n\" +\n+ \"}\";\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));\n+ assertEquals(\"[geo_distance] query doesn't support multiple fields, found [point1] and [point2]\", e.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -41,7 +41,6 @@\n import static org.hamcrest.CoreMatchers.notNullValue;\n import static org.hamcrest.Matchers.closeTo;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.is;\n \n public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanceRangeQueryBuilder> {\n \n@@ -213,96 +212,57 @@ public void testToQuery() throws IOException {\n }\n \n public void testNullFieldName() {\n- try {\n- if (randomBoolean()) {\n- new GeoDistanceRangeQueryBuilder(null, new GeoPoint());\n- } else {\n- new GeoDistanceRangeQueryBuilder(\"\", new GeoPoint());\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"fieldName must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoDistanceRangeQueryBuilder(null, new GeoPoint()));\n+ assertEquals(\"fieldName must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoDistanceRangeQueryBuilder(\"\", new GeoPoint()));\n+ assertEquals(\"fieldName must not be null\", e.getMessage());\n }\n \n public void testNoPoint() {\n- try {\n- if (randomBoolean()) {\n- new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (GeoPoint) null);\n- } else {\n- new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (String) null);\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"point must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (GeoPoint) null));\n+ assertEquals(\"point must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (String) null));\n+ assertEquals(\"point must not be null\", e.getMessage());\n }\n \n public void testInvalidFrom() {\n GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());\n- try {\n- if (randomBoolean()) {\n- builder.from((String) null);\n- } else {\n- builder.from((Number) null);\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"[from] must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.from((String) null));\n+ assertEquals(\"[from] must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> builder.from((Number) null));\n+ assertEquals(\"[from] must not be null\", e.getMessage());\n }\n \n public void testInvalidTo() {\n GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());\n- try {\n- if (randomBoolean()) {\n- builder.to((String) null);\n- } else {\n- builder.to((Number) null);\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"[to] must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.to((String) null));\n+ assertEquals(\"[to] must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> builder.to((Number) null));\n+ assertEquals(\"[to] must not be null\", e.getMessage());\n }\n \n public void testInvalidOptimizeBBox() {\n GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());\n- if (randomBoolean()) {\n- try {\n- builder.optimizeBbox(null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"optimizeBbox must not be null\"));\n- }\n- } else {\n- try {\n- builder.optimizeBbox(\"foo\");\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"optimizeBbox must be one of [none, memory, indexed]\"));\n- }\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.optimizeBbox(null));\n+ assertEquals(\"optimizeBbox must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> builder.optimizeBbox(\"foo\"));\n+ assertEquals(\"optimizeBbox must be one of [none, memory, indexed]\", e.getMessage());\n }\n \n public void testInvalidGeoDistance() {\n GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());\n- try {\n- builder.geoDistance(null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"geoDistance calculation mode must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.geoDistance(null));\n+ assertEquals(\"geoDistance calculation mode must not be null\", e.getMessage());\n }\n \n public void testInvalidDistanceUnit() {\n GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());\n- try {\n- builder.unit(null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"distance unit must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.unit(null));\n+ assertEquals(\"distance unit must not be null\", e.getMessage());\n }\n \n public void testNestedRangeQuery() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java", "status": "modified" }, { "diff": "@@ -20,7 +20,6 @@\n package org.elasticsearch.index.query;\n \n import com.vividsolutions.jts.geom.Coordinate;\n-\n import org.apache.lucene.search.MatchNoDocsQuery;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery;\n@@ -39,6 +38,7 @@\n \n import java.io.IOException;\n import java.util.ArrayList;\n+import java.util.Collections;\n import java.util.List;\n \n import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;\n@@ -47,7 +47,6 @@\n import static org.hamcrest.CoreMatchers.notNullValue;\n import static org.hamcrest.Matchers.closeTo;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.is;\n \n public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase<GeoPolygonQueryBuilder> {\n @Override\n@@ -144,50 +143,36 @@ public List<GeoPoint> randomPolygon(int numPoints) {\n }\n \n public void testNullFieldName() {\n- try {\n- new GeoPolygonQueryBuilder(null, randomPolygon(5));\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"fieldName must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GeoPolygonQueryBuilder(null, randomPolygon(5)));\n+ assertEquals(\"fieldName must not be null\", e.getMessage());\n }\n \n public void testEmptyPolygon() {\n- try {\n- if (randomBoolean()) {\n- new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, new ArrayList<GeoPoint>());\n- } else {\n- new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, null);\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"polygon must not be null or empty\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, Collections.emptyList()));\n+ assertEquals(\"polygon must not be null or empty\", e.getMessage());\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, null));\n+ assertEquals(\"polygon must not be null or empty\", e.getMessage());\n }\n \n public void testInvalidClosedPolygon() {\n List<GeoPoint> points = new ArrayList<>();\n points.add(new GeoPoint(0, 90));\n points.add(new GeoPoint(90, 90));\n points.add(new GeoPoint(0, 90));\n- try {\n- new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"too few points defined for geo_polygon query\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points));\n+ assertEquals(\"too few points defined for geo_polygon query\", e.getMessage());\n }\n \n public void testInvalidOpenPolygon() {\n List<GeoPoint> points = new ArrayList<>();\n points.add(new GeoPoint(0, 90));\n points.add(new GeoPoint(90, 90));\n- try {\n- new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"too few points defined for geo_polygon query\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points));\n+ assertEquals(\"too few points defined for geo_polygon query\", e.getMessage());\n }\n \n public void testDeprecatedXContent() throws IOException {\n@@ -205,12 +190,8 @@ public void testDeprecatedXContent() throws IOException {\n builder.field(\"normalize\", true); // deprecated\n builder.endObject();\n builder.endObject();\n- try {\n- parseQuery(builder.string());\n- fail(\"normalize is deprecated\");\n- } catch (IllegalArgumentException ex) {\n- assertEquals(\"Deprecated field [normalize] used, replaced by [use validation_method instead]\", ex.getMessage());\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(builder.string()));\n+ assertEquals(\"Deprecated field [normalize] used, replaced by [use validation_method instead]\", e.getMessage());\n }\n \n public void testParsingAndToQueryParsingExceptions() throws IOException {\n@@ -223,12 +204,7 @@ public void testParsingAndToQueryParsingExceptions() throws IOException {\n };\n for (String brokenFile : brokenFiles) {\n String query = copyToStringFromClasspath(brokenFile);\n- try {\n- parseQuery(query);\n- fail(\"parsing a broken geo_polygon filter didn't fail as expected while parsing: \" + brokenFile);\n- } catch (ParsingException e) {\n- // success!\n- }\n+ expectThrows(ParsingException.class, () -> parseQuery(query));\n }\n }\n ", "filename": "core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -20,7 +20,6 @@\n package org.elasticsearch.index.query;\n \n import com.vividsolutions.jts.geom.Coordinate;\n-\n import org.apache.lucene.search.BooleanQuery;\n import org.apache.lucene.search.ConstantScoreQuery;\n import org.apache.lucene.search.MatchNoDocsQuery;\n@@ -50,7 +49,6 @@\n import static org.hamcrest.CoreMatchers.notNullValue;\n import static org.hamcrest.Matchers.anyOf;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.is;\n \n public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQueryBuilder> {\n \n@@ -156,70 +154,44 @@ public void testToQuery() throws IOException {\n \n public void testNoFieldName() throws Exception {\n ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null);\n- try {\n- new GeoShapeQueryBuilder(null, shape);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"fieldName is required\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GeoShapeQueryBuilder(null, shape));\n+ assertEquals(\"fieldName is required\", e.getMessage());\n }\n \n public void testNoShape() throws IOException {\n- try {\n- new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, (ShapeBuilder) null);\n- fail(\"exception expected\");\n- } catch (IllegalArgumentException e) {\n- // expected\n- }\n+ expectThrows(IllegalArgumentException.class, () -> new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, null));\n }\n \n public void testNoIndexedShape() throws IOException {\n- try {\n- new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, null, \"type\");\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"either shapeBytes or indexedShapeId and indexedShapeType are required\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, null, \"type\"));\n+ assertEquals(\"either shapeBytes or indexedShapeId and indexedShapeType are required\", e.getMessage());\n }\n \n public void testNoIndexedShapeType() throws IOException {\n- try {\n- new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, \"id\", null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"indexedShapeType is required if indexedShapeId is specified\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, \"id\", null));\n+ assertEquals(\"indexedShapeType is required if indexedShapeId is specified\", e.getMessage());\n }\n \n public void testNoRelation() throws IOException {\n ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null);\n GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape);\n- try {\n- builder.relation(null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"No Shape Relation defined\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.relation(null));\n+ assertEquals(\"No Shape Relation defined\", e.getMessage());\n }\n \n public void testInvalidRelation() throws IOException {\n ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null);\n GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape);\n- try {\n- builder.strategy(SpatialStrategy.TERM);\n- builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN));\n- fail(\"Illegal combination of strategy and relation setting\");\n- } catch (IllegalArgumentException e) {\n- // okay\n- }\n-\n- try {\n- builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN));\n- builder.strategy(SpatialStrategy.TERM);\n- fail(\"Illegal combination of strategy and relation setting\");\n- } catch (IllegalArgumentException e) {\n- // okay\n- }\n+ builder.strategy(SpatialStrategy.TERM);\n+ expectThrows(IllegalArgumentException.class, () -> builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)));\n+ GeoShapeQueryBuilder builder2 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape);\n+ builder2.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN));\n+ expectThrows(IllegalArgumentException.class, () -> builder2.strategy(SpatialStrategy.TERM));\n+ GeoShapeQueryBuilder builder3 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape);\n+ builder3.strategy(SpatialStrategy.TERM);\n+ expectThrows(IllegalArgumentException.class, () -> builder3.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)));\n }\n \n // see #3878\n@@ -256,16 +228,15 @@ public void testMustRewrite() throws IOException {\n sqb = doCreateTestQueryBuilder();\n // do this until we get one without a shape\n } while (sqb.shape() != null);\n- try {\n- sqb.toQuery(createShardContext());\n- fail();\n- } catch (UnsupportedOperationException e) {\n- assertEquals(\"query must be rewritten first\", e.getMessage());\n- }\n- QueryBuilder rewrite = sqb.rewrite(createShardContext());\n+\n+ GeoShapeQueryBuilder query = sqb;\n+\n+ UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> query.toQuery(createShardContext()));\n+ assertEquals(\"query must be rewritten first\", e.getMessage());\n+ QueryBuilder rewrite = query.rewrite(createShardContext());\n GeoShapeQueryBuilder geoShapeQueryBuilder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, indexedShapeToReturn);\n- geoShapeQueryBuilder.strategy(sqb.strategy());\n- geoShapeQueryBuilder.relation(sqb.relation());\n+ geoShapeQueryBuilder.strategy(query.strategy());\n+ geoShapeQueryBuilder.relation(query.relation());\n assertEquals(geoShapeQueryBuilder, rewrite);\n }\n ", "filename": "core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -39,7 +39,6 @@\n import static org.hamcrest.CoreMatchers.instanceOf;\n import static org.hamcrest.CoreMatchers.notNullValue;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.is;\n \n public class GeohashCellQueryBuilderTests extends AbstractQueryTestCase<Builder> {\n \n@@ -92,39 +91,23 @@ public void testToQuery() throws IOException {\n }\n \n public void testNullField() {\n- try {\n- if (randomBoolean()) {\n- new Builder(null, new GeoPoint());\n- } else {\n- new Builder(\"\", new GeoPoint());\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"fieldName must not be null\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Builder(null, new GeoPoint()));\n+ assertEquals(\"fieldName must not be null\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> new Builder(\"\", new GeoPoint()));\n+ assertEquals(\"fieldName must not be null\", e.getMessage());\n }\n \n public void testNullGeoPoint() {\n- try {\n- if (randomBoolean()) {\n- new Builder(GEO_POINT_FIELD_NAME, (GeoPoint) null);\n- } else {\n- new Builder(GEO_POINT_FIELD_NAME, \"\");\n- }\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"geohash or point must be defined\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Builder(GEO_POINT_FIELD_NAME, (GeoPoint) null));\n+ assertEquals(\"geohash or point must be defined\", e.getMessage());\n+ e = expectThrows(IllegalArgumentException.class, () -> new Builder(GEO_POINT_FIELD_NAME, \"\"));\n+ assertEquals(\"geohash or point must be defined\", e.getMessage());\n }\n \n public void testInvalidPrecision() {\n GeohashCellQuery.Builder builder = new Builder(GEO_POINT_FIELD_NAME, new GeoPoint());\n- try {\n- builder.precision(-1);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), containsString(\"precision must be greater than 0\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.precision(-1));\n+ assertThat(e.getMessage(), containsString(\"precision must be greater than 0\"));\n }\n \n public void testLocationParsing() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -21,7 +21,6 @@\n \n import com.carrotsearch.randomizedtesting.generators.RandomPicks;\n import com.fasterxml.jackson.core.JsonParseException;\n-\n import org.apache.lucene.queries.TermsQuery;\n import org.apache.lucene.search.BooleanClause;\n import org.apache.lucene.search.BooleanQuery;\n@@ -63,7 +62,6 @@\n import static org.hamcrest.CoreMatchers.instanceOf;\n import static org.hamcrest.CoreMatchers.notNullValue;\n import static org.hamcrest.CoreMatchers.startsWith;\n-import static org.hamcrest.Matchers.is;\n \n public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQueryBuilder> {\n protected static final String PARENT_TYPE = \"parent\";\n@@ -367,24 +365,17 @@ public void testNoneFromString() {\n * Should throw {@link IllegalArgumentException} instead of NPE.\n */\n public void testThatNullFromStringThrowsException() {\n- try {\n- HasChildQueryBuilder.parseScoreMode(null);\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"No score mode for child query [null] found\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HasChildQueryBuilder.parseScoreMode(null));\n+ assertEquals(\"No score mode for child query [null] found\", e.getMessage());\n }\n \n /**\n * Failure should not change (and the value should never match anything...).\n */\n public void testThatUnrecognizedFromStringThrowsException() {\n- try {\n- HasChildQueryBuilder.parseScoreMode(\"unrecognized value\");\n- fail(\"Expected IllegalArgumentException\");\n- } catch (IllegalArgumentException e) {\n- assertThat(e.getMessage(), is(\"No score mode for child query [unrecognized value] found\"));\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,\n+ () -> HasChildQueryBuilder.parseScoreMode(\"unrecognized value\"));\n+ assertEquals(\"No score mode for child query [unrecognized value] found\", e.getMessage());\n }\n \n public void testIgnoreUnmapped() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -157,12 +157,8 @@ public void testDeprecatedXContent() throws IOException {\n builder.field(\"type\", \"foo\"); // deprecated\n builder.endObject();\n builder.endObject();\n- try {\n- parseQuery(builder.string());\n- fail(\"type is deprecated\");\n- } catch (IllegalArgumentException ex) {\n- assertEquals(\"Deprecated field [type] used, expected [parent_type] instead\", ex.getMessage());\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(builder.string()));\n+ assertEquals(\"Deprecated field [type] used, expected [parent_type] instead\", e.getMessage());\n \n HasParentQueryBuilder queryBuilder = (HasParentQueryBuilder) parseQuery(builder.string(), ParseFieldMatcher.EMPTY);\n assertEquals(\"foo\", queryBuilder.type());", "filename": "core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -32,20 +32,15 @@\n \n import static org.hamcrest.CoreMatchers.instanceOf;\n import static org.hamcrest.Matchers.containsString;\n-import static org.hamcrest.Matchers.is;\n \n public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder> {\n /**\n * Check that parser throws exception on missing values field.\n */\n public void testIdsNotProvided() throws IOException {\n String noIdsFieldQuery = \"{\\\"ids\\\" : { \\\"type\\\" : \\\"my_type\\\" }\";\n- try {\n- parseQuery(noIdsFieldQuery);\n- fail(\"Expected ParsingException\");\n- } catch (ParsingException e) {\n- assertThat(e.getMessage(), containsString(\"no ids values provided\"));\n- }\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(noIdsFieldQuery));\n+ assertThat(e.getMessage(), containsString(\"no ids values provided\"));\n }\n \n @Override\n@@ -94,30 +89,19 @@ protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, Qu\n }\n \n public void testIllegalArguments() {\n- try {\n- new IdsQueryBuilder((String[])null);\n- fail(\"must be not null\");\n- } catch(IllegalArgumentException e) {\n- //all good\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IdsQueryBuilder((String[]) null));\n+ assertEquals(\"[ids] types cannot be null\", e.getMessage());\n \n- try {\n- new IdsQueryBuilder().addIds((String[])null);\n- fail(\"must be not null\");\n- } catch(IllegalArgumentException e) {\n- //all good\n- }\n+ IdsQueryBuilder idsQueryBuilder = new IdsQueryBuilder();\n+ e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[])null));\n+ assertEquals(\"[ids] ids cannot be null\", e.getMessage());\n }\n \n // see #7686.\n public void testIdsQueryWithInvalidValues() throws Exception {\n String query = \"{ \\\"ids\\\": { \\\"values\\\": [[1]] } }\";\n- try {\n- parseQuery(query);\n- fail(\"Expected ParsingException\");\n- } catch (ParsingException e) {\n- assertThat(e.getMessage(), is(\"Illegal value for id, expecting a string or number, got: START_ARRAY\"));\n- }\n+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(query));\n+ assertEquals(\"Illegal value for id, expecting a string or number, got: START_ARRAY\", e.getMessage());\n }\n \n public void testFromJson() throws IOException {\n@@ -143,7 +127,7 @@ public void testFromJsonDeprecatedSyntax() throws IOException {\n IdsQueryBuilder testQuery = new IdsQueryBuilder(type);\n \n //single value type can also be called _type\n- String contentString = \"{\\n\" +\n+ final String contentString = \"{\\n\" +\n \" \\\"ids\\\" : {\\n\" +\n \" \\\"_type\\\" : \\\"\" + type + \"\\\",\\n\" +\n \" \\\"values\\\" : []\\n\" +\n@@ -153,27 +137,20 @@ public void testFromJsonDeprecatedSyntax() throws IOException {\n IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY);\n assertEquals(testQuery, parsed);\n \n- try {\n- parseQuery(contentString);\n- fail(\"parse should have failed\");\n- } catch(IllegalArgumentException e) {\n- assertEquals(\"Deprecated field [_type] used, expected [type] instead\", e.getMessage());\n- }\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString));\n+ assertEquals(\"Deprecated field [_type] used, expected [type] instead\", e.getMessage());\n \n //array of types can also be called type rather than types\n- contentString = \"{\\n\" +\n+ final String contentString2 = \"{\\n\" +\n \" \\\"ids\\\" : {\\n\" +\n \" \\\"types\\\" : [\\\"\" + type + \"\\\"],\\n\" +\n \" \\\"values\\\" : []\\n\" +\n \" }\\n\" +\n \"}\";\n parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY);\n assertEquals(testQuery, parsed);\n- try {\n- parseQuery(contentString);\n- fail(\"parse should have failed\");\n- } catch(IllegalArgumentException e) {\n- assertEquals(\"Deprecated field [types] used, expected [type] instead\", e.getMessage());\n- }\n+\n+ e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString2));\n+ assertEquals(\"Deprecated field [types] used, expected [type] instead\", e.getMessage());\n }\n }", "filename": "core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java", "status": "modified" } ] }
{ "body": "If you run:\n\n``` http\nGET /_cat/tasks?detailed\n```\n\nthen it will consistently fail with:\n\n``` json\n{\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"mismatch on number of cells 14 in a row compared to header 15\"}],\"type\":\"illegal_state_exception\",\"reason\":\"mismatch on number of cells 14 in a row compared to header 15\"},\"status\":500}\n```\n\nThe raw exception is:\n\n```\n[2016-08-02 13:30:38,889][WARN ][rest.suppressed ] path: /_cat/tasks, params: {detailed=}\njava.lang.IllegalStateException: mismatch on number of cells 14 in a row compared to header 15\n at org.elasticsearch.common.Table.endRow(Table.java:113)\n at org.elasticsearch.common.Table.endRow(Table.java:121)\n at org.elasticsearch.rest.action.cat.RestTasksAction.buildRow(RestTasksAction.java:142)\n at org.elasticsearch.rest.action.cat.RestTasksAction.buildGroups(RestTasksAction.java:150)\n at org.elasticsearch.rest.action.cat.RestTasksAction.buildTable(RestTasksAction.java:159)\n at org.elasticsearch.rest.action.cat.RestTasksAction.access$000(RestTasksAction.java:50)\n at org.elasticsearch.rest.action.cat.RestTasksAction$1.buildResponse(RestTasksAction.java:70)\n at org.elasticsearch.rest.action.cat.RestTasksAction$1.buildResponse(RestTasksAction.java:67)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:37)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:47)\n at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:91)\n at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:87)\n at org.elasticsearch.xpack.security.action.filter.SecurityActionFilter$SigningListener.onResponse(SecurityActionFilter.java:237)\n at org.elasticsearch.xpack.security.action.filter.SecurityActionFilter$SigningListener.onResponse(SecurityActionFilter.java:216)\n at org.elasticsearch.action.support.TransportAction$ResponseFilterChain.proceed(TransportAction.java:224)\n at org.elasticsearch.xpack.security.action.filter.SecurityActionFilter.apply(SecurityActionFilter.java:129)\n at org.elasticsearch.action.support.TransportAction$ResponseFilterChain.proceed(TransportAction.java:222)\n at org.elasticsearch.action.ingest.IngestActionFilter.apply(IngestActionFilter.java:87)\n at org.elasticsearch.action.support.TransportAction$ResponseFilterChain.proceed(TransportAction.java:222)\n at org.elasticsearch.action.support.TransportAction$FilteredActionListener.onResponse(TransportAction.java:249)\n at org.elasticsearch.action.support.TransportAction$FilteredActionListener.onResponse(TransportAction.java:235)\n at org.elasticsearch.action.support.tasks.TransportTasksAction$AsyncAction.finishHim(TransportTasksAction.java:297)\n at org.elasticsearch.action.support.tasks.TransportTasksAction$AsyncAction.onOperation(TransportTasksAction.java:272)\n at org.elasticsearch.action.support.tasks.TransportTasksAction$AsyncAction.access$500(TransportTasksAction.java:188)\n at org.elasticsearch.action.support.tasks.TransportTasksAction$AsyncAction$1.handleResponse(TransportTasksAction.java:248)\n at org.elasticsearch.action.support.tasks.TransportTasksAction$AsyncAction$1.handleResponse(TransportTasksAction.java:240)\n at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleResponse(TransportService.java:928)\n at org.elasticsearch.transport.TcpTransport$1.doRun(TcpTransport.java:1245)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at org.elasticsearch.threadpool.ThreadPool.lambda$static$154(ThreadPool.java:145)\n at org.elasticsearch.threadpool.ThreadPool$$Lambda$167/1800693087.execute(Unknown Source)\n at org.elasticsearch.transport.TcpTransport.handleResponse(TcpTransport.java:1237)\n at org.elasticsearch.transport.TcpTransport.messageReceived(TcpTransport.java:1206)\n at org.elasticsearch.transport.netty3.Netty3MessageChannelHandler.messageReceived(Netty3MessageChannelHandler.java:73)\n at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)\n at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)\n at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)\n at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)\n at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:462)\n at org.jboss.netty.handler.codec.frame.FrameDecoder.callDecode(FrameDecoder.java:443)\n at org.jboss.netty.handler.codec.frame.FrameDecoder.messageReceived(FrameDecoder.java:303)\n at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)\n at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)\n at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)\n at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)\n at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)\n at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)\n at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)\n at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)\n at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)\n at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)\n at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)\n at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n", "comments": [], "number": 19755, "title": "\"_cat/tasks?detailed\" fails consistently" }
{ "body": "Currently the cat tasks operation fails in the detailed mode.\n\nCloses #19755\n", "number": 19759, "review_comments": [], "title": "Fixes cat tasks operation in detailed mode" }
{ "commits": [ { "message": "Fixes cat tasks operation in detailed mode\n\nCurrently the cat tasks operation fails in the detailed mode.\n\nCloses #19755" } ], "files": [ { "diff": "@@ -98,7 +98,7 @@ protected Table getTableWithHeader(final RestRequest request) {\n \n // Task detailed info\n if (detailed) {\n- table.addCell(\"description\", \"default:false;alias:desc;desc:task action\");\n+ table.addCell(\"description\", \"default:true;alias:desc;desc:task action\");\n }\n table.endHeaders();\n return table;\n@@ -142,7 +142,7 @@ private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNo\n table.endRow();\n }\n \n- private void buildGroups(Table table, boolean detailed, boolean fullId, List<TaskGroup> taskGroups) {\n+ private void buildGroups(Table table, boolean fullId, boolean detailed, List<TaskGroup> taskGroups) {\n DiscoveryNodes discoveryNodes = clusterService.state().nodes();\n List<TaskGroup> sortedGroups = new ArrayList<>(taskGroups);\n sortedGroups.sort((o1, o2) -> Long.compare(o1.getTaskInfo().getStartTime(), o2.getTaskInfo().getStartTime()));", "filename": "core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java", "status": "modified" }, { "diff": "@@ -7,4 +7,13 @@\n - match:\n $body: |\n / # action task_id parent_task_id type start_time timestamp running_time ip node\n- ^( \\S+\\s+ \\S+\\:\\d+\\s+ (?:\\-|\\S+\\:\\d+)\\s+ \\S+\\s+ \\d+\\s+ \\d\\d\\:\\d\\d\\:\\d\\d\\s+ \\S+\\s+ \\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}\\s+ \\S+(?:\\s\\S+)*\\n)+$/\n+ ^( \\S+\\s+ \\S+\\:\\d+\\s+ (?:\\-|\\S+\\:\\d+)\\s+ \\S+\\s+ \\d+\\s+ \\d\\d\\:\\d\\d\\:\\d\\d\\s+ \\S+\\s+ \\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}\\s+ \\S+\\n)+$/\n+\n+ - do:\n+ cat.tasks:\n+ detailed: true\n+\n+ - match:\n+ $body: |\n+ / # action task_id parent_task_id type start_time timestamp running_time ip node description\n+ ^( \\S+\\s+ \\S+\\:\\d+\\s+ (?:\\-|\\S+\\:\\d+)\\s+ \\S+\\s+ \\d+\\s+ \\d\\d\\:\\d\\d\\:\\d\\d\\s+ \\S+\\s+ \\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}\\s+ \\S+\\s+ .*\\n)+$/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/cat.tasks/10_basic.yaml", "status": "modified" } ] }
{ "body": "The two write methods are defined like this:\n\n```\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n final Path file = path.resolve(blobName);\n try (OutputStream outputStream = Files.newOutputStream(file)) {\n ...\n```\n\nPassing nothing to Files.newOutputStream will truncate the output file if its already there:\n\n```\nIf no options are present then this method works as if the CREATE, TRUNCATE_EXISTING, and WRITE options are present. In other words, it opens the file for writing, creating the file if it doesn't exist, or initially truncating an existing regular-file to a size of 0 if it exists. \n```\n\nCan we please pass `StandardOpenOptions.CREATE_NEW` so that silent data truncation never happens? \n", "comments": [ { "body": "> Can we please pass StandardOpenOptions.CREATE_NEW so that silent data truncation never happens?\n\n+100\n", "created_at": "2015-12-21T20:13:24Z" } ], "number": 15579, "title": "FSBlobContainer leniently truncates" }
{ "body": "Enables implementations of the BlobContainer interface to\nconform with the requirements of the writeBlob method by\nthrowing a FileAlreadyExistsException if attempting to write\nto a blob that already exists. This change means implementations\nof BlobContainer should never overwrite blobs - to overwrite a\nblob, it must first be deleted and then can be written again.\n\nCloses #15579\n", "number": 19749, "review_comments": [], "title": "BlobContainer#writeBlob no longer can overwrite a blob" }
{ "commits": [ { "message": "Enables implementations of the BlobContainer interface to\nconform with the requirements of the writeBlob method by\nthrowing a FileAlreadyExistsException if attempting to write\nto a blob that already exists. This change means implementations\nof BlobContainer should never overwrite blobs - to overwrite a\nblob, it must first be deleted and then can be written again.\n\nCloses #15579" } ], "files": [ { "diff": "@@ -21,6 +21,7 @@\n \n import java.io.IOException;\n import java.io.InputStream;\n+import java.nio.file.FileAlreadyExistsException;\n import java.nio.file.NoSuchFileException;\n import java.util.Map;\n \n@@ -68,8 +69,8 @@ public interface BlobContainer {\n * @param blobSize\n * The size of the blob to be written, in bytes. It is implementation dependent whether\n * this value is used in writing the blob to the repository.\n- * @throws IOException if the input stream could not be read, a blob by the same name already exists,\n- * or the target blob could not be written to.\n+ * @throws FileAlreadyExistsException if a blob by the same name already exists\n+ * @throws IOException if the input stream could not be read, or the target blob could not be written to.\n */\n void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException;\n ", "filename": "core/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java", "status": "modified" }, { "diff": "@@ -32,6 +32,7 @@\n import java.io.InputStream;\n import java.io.OutputStream;\n import java.nio.file.DirectoryStream;\n+import java.nio.file.FileAlreadyExistsException;\n import java.nio.file.Files;\n import java.nio.file.NoSuchFileException;\n import java.nio.file.Path;\n@@ -108,6 +109,9 @@ public InputStream readBlob(String name) throws IOException {\n \n @Override\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n+ if (blobExists(blobName)) {\n+ throw new FileAlreadyExistsException(\"blob [\" + blobName + \"] already exists, cannot overwrite\");\n+ }\n final Path file = path.resolve(blobName);\n try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) {\n Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]);", "filename": "core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java", "status": "modified" }, { "diff": "@@ -34,6 +34,7 @@\n import java.io.OutputStream;\n import java.net.HttpURLConnection;\n import java.net.URISyntaxException;\n+import java.nio.file.FileAlreadyExistsException;\n import java.nio.file.NoSuchFileException;\n import java.util.Map;\n \n@@ -84,6 +85,9 @@ public InputStream readBlob(String blobName) throws IOException {\n \n @Override\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n+ if (blobExists(blobName)) {\n+ throw new FileAlreadyExistsException(\"blob [\" + blobName + \"] already exists, cannot overwrite\");\n+ }\n logger.trace(\"writeBlob({}, stream, {})\", blobName, blobSize);\n try (OutputStream stream = createOutput(blobName)) {\n Streams.copy(inputStream, stream);", "filename": "plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java", "status": "modified" }, { "diff": "@@ -26,6 +26,7 @@\n \n import java.io.IOException;\n import java.io.InputStream;\n+import java.nio.file.FileAlreadyExistsException;\n import java.util.Map;\n \n public class GoogleCloudStorageBlobContainer extends AbstractBlobContainer {\n@@ -65,6 +66,9 @@ public InputStream readBlob(String blobName) throws IOException {\n \n @Override\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n+ if (blobExists(blobName)) {\n+ throw new FileAlreadyExistsException(\"blob [\" + blobName + \"] already exists, cannot overwrite\");\n+ }\n blobStore.writeBlob(buildKey(blobName), inputStream, blobSize);\n }\n ", "filename": "plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobContainer.java", "status": "modified" }, { "diff": "@@ -34,6 +34,7 @@\n \n import java.io.IOException;\n import java.io.InputStream;\n+import java.nio.file.FileAlreadyExistsException;\n import java.nio.file.NoSuchFileException;\n import java.util.Collections;\n import java.util.EnumSet;\n@@ -107,6 +108,9 @@ public InputStream run(FileContext fileContext) throws IOException {\n \n @Override\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n+ if (blobExists(blobName)) {\n+ throw new FileAlreadyExistsException(\"blob [\" + blobName + \"] already exists, cannot overwrite\");\n+ }\n store.execute(new Operation<Void>() {\n @Override\n public Void run(FileContext fileContext) throws IOException {", "filename": "plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java", "status": "modified" }, { "diff": "@@ -39,6 +39,7 @@\n import java.io.IOException;\n import java.io.InputStream;\n import java.io.OutputStream;\n+import java.nio.file.FileAlreadyExistsException;\n import java.nio.file.NoSuchFileException;\n import java.security.AccessController;\n import java.security.PrivilegedActionException;\n@@ -100,6 +101,9 @@ public InputStream readBlob(String blobName) throws IOException {\n \n @Override\n public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {\n+ if (blobExists(blobName)) {\n+ throw new FileAlreadyExistsException(\"blob [\" + blobName + \"] already exists, cannot overwrite\");\n+ }\n try (OutputStream stream = createOutput(blobName)) {\n Streams.copy(inputStream, stream);\n }", "filename": "plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java", "status": "modified" }, { "diff": "@@ -128,7 +128,6 @@ public void testDeleteBlob() throws IOException {\n }\n }\n \n- @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/15579\")\n public void testVerifyOverwriteFails() throws IOException {\n try (final BlobStore store = newBlobStore()) {\n final String blobName = \"foobar\";", "filename": "test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.4\n**JVM version**: 1.8.0_91\n**OS version**: RedHat 6.5\n\nWe are using the TribeNode feature to enable search across a number of geographically distributed ElasticSearch clusters. Occasionally when we take one of these clusters completely offline, we find that our TribeNode hits the following exception:\n\n```\njava.lang.OutOfMemoryError: unable to create new native thread\n at java.lang.Thread.start0(Native Method)\n at java.lang.Thread.start(Thread.java:714)\n at java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:950)\n at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1368)\n at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.execute(EsThreadPoolExecutor.java:85)\n at org.elasticsearch.threadpool.ThreadPool$ThreadedRunnable.run(ThreadPool.java:676)\n at org.elasticsearch.threadpool.ThreadPool$LoggingRunnable.run(ThreadPool.java:640)\n at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)\n at java.util.concurrent.FutureTask.run(FutureTask.java:266)\n at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)\n at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n\nThis exception is thrown because of thread exhaustion due to the TribeNode creating a new thread every couple of seconds. Below is the stack trace of the leaked threads:\n\n```\njava.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n org.elasticsearch.common.util.concurrent.KeyedLock.acquire(KeyedLock.java:75)\n org.elasticsearch.transport.netty.NettyTransport.disconnectFromNode(NettyTransport.java:1063)\n org.elasticsearch.transport.TransportService.disconnectFromNode(TransportService.java:274)\n org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing$2$1.doRun(UnicastZenPing.java:258)\n org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n java.lang.Thread.run(Thread.java:745)\n```\n\n**Steps to reproduce**:\n Create TribeNode configuration where one cluster is offline. Its not enough that the processes are shutdown and the machine is online, the nodes specified in the discovery.zen.ping.unicast.hosts for the offline cluster must be offline and not respond to ping/connection attempts. Here is a simple configuration I was able to use to reproduce the problem.\n\n```\n\n---\ncluster.name: \"thread-leak-test\"\nnode.name: \"thread-leak-node\"\nhttp.port: \"9201\"\nhttp.host: \"127.0.0.1\"\ntribe:\n online-cluster:\n cluster.name: \"online-cluster\"\n discovery.zen.ping.unicast.hosts:\n - \"localhost\"\n offline-cluster:\n cluster.name: \"offline-cluster\"\n discovery.zen.ping.unicast.hosts:\n - \"10.10.10.10\"\n```\n\nStart the Tribe node. Observe that the number of threads continue to grow unbounded (`ps -m <pid> | wc -l`) until the OutOfMemoryError: unable to create new native thread exceptions are thrown.\n\nThis issue appears similar to the problem described in #8057.\n", "comments": [ { "body": "@escheie Thanks for reporting! I reproduced it and will come back with a potential fix.\n", "created_at": "2016-08-01T11:31:50Z" }, { "body": "Thanks tlrx for looking into the issue. I've found that setting `discovery.zen.ping.unicast.concurrent_connects` to 1 (default value is 10) limits the number of threads that pile up as the lock gets released to the threads waiting on disconnect every 30s between connection timeouts. When it was 10, the connect threads could theoretically hold the lock forever, preventing the disconnect threads from ever getting a chance to complete.\n", "created_at": "2016-08-05T00:23:25Z" }, { "body": "@escheie True. But I think the comment made is the #19719 makes sense and changing the connection timeout for pings will help. But I still need to think about it again.\n", "created_at": "2016-08-05T07:14:44Z" }, { "body": "With more testing overnight, I found that setting the \"discovery.zen.ping.unicast.concurrent_connects\" to 1 only works if TRACE logging is enabled for discovery. Seems that little extra time the connect thread spends logging gives the other threads performing the disconnect a chance to get the lock. Would a shorter connect timeout help if it is still more than the interval that connects are attempted which appear to be every 1.5 seconds? \n\nLooks like the KeyedLock used by NettyTransport in 2.x and TcpTransport in 5.x supports a fair option so that threads are able to acquire the lock in the order they request it. This fair option is currently set to false. If threads are able to obtain the lock in they order they request it, then that should ensure the disconnect threads get a chance to run between connection attempts. I suppose enabling the fair option though would result in a significant performance penalty, so probably not an option.\n", "created_at": "2016-08-05T17:49:02Z" }, { "body": "I've confirmed that enabling the \"fair\" flag in the KeyedLock does prevent the number of threads from growing unbounded. The maximum number of threads that pile up equals (discovery.zen.ping.unicast.concurrent_connects \\* connect_timeout)/(disconnect frequency) = 10*30/3 = 100. This number can be reduced by lowering discovery.zen.ping.unicast.concurrent_connects in the configuration or if the connect_timeout is also lowered as proposed. \n\nSince it looks like the KeyedLock is only used during connect and disconnect and not for connection lookup, enabling the fair flag may not impact performance as I previously feared.\n", "created_at": "2016-08-05T19:18:24Z" }, { "body": "Thanks @escheie ! Your effort and investigation are great.\n\nI do feel like the issue happens because we try to disconnect from nodes even if we never succeed to connect to them, and `UnicastZenPing` blindly piles up thread for disconnecting them (and these threads try to acquire a lock and slow dows ping threads too).\n\nI proposed a new fix #19719, I'm wondering if it works for you too.\n", "created_at": "2016-08-24T10:34:27Z" } ], "number": 19370, "title": "Thread leak in TribeNode when a cluster is offline" }
{ "body": "The Unicast Zen Ping service pings all known nodes every 3 seconds using a light connecting method. For nodes defined in the configuration as unicast hosts and not yet \"found by address\" (meaning that a successful connection has never been established to them) the node is added to a list of nodes to disconnect once the ping is terminated whatever the result of the ping. The round of pings is executed until a master is elected, but if no master can be elected (because of min master nodes or in case of a tribe client node with an offline remote cluster) the pings are executed over and over.\n\nThe thing is that nodes are pinged every `3s` but the connection timeout is configured by default to `30s`. This leads to a situation where many threads are created and added to the generic thread pool in order to disconnect from the node but the disconnect method `TcpTransport.disconnectFromNode(DiscoveryNode node)` blindly tries to acquire a lock on the node even if it will be impossible to disconnect from it (because node is not reachable).  So disconnecting threads are stacked at the rate of 1 every 3sec until the generic thread pool is full.\n\nAdding a check in the `TcpTransport.disconnectFromNode(DiscoveryNode node)` similar to the check done in`disconnectFromNode(DiscoveryNode node, Channel channel, String reason)` avoids threads to block for nothing.\n\nWe could also use a connection timeout of 3s when pinging nodes as it would help to fail connection faster and it would keep the number of blocking threads lower but would not resolve the main issue of threads blocking for nothing.\n\nThis settings can be used to reproduce the issue (check number of threads of generic thread pool):\n\n```\ntribe.t1.cluster.name: \"offline\"\ntribe.t1.discovery.zen.ping.unicast.hosts:\n- '10.10.10.10'\n```\n\nor\n\n```\ndiscovery.zen.minimum_master_nodes: 2\ndiscovery.zen.ping.unicast.hosts:\n- '10.10.10.10'\n```\n\ncloses #19370\n\nI think we have the same issue in 2.x. in `NettyTransport`\n", "number": 19719, "review_comments": [ { "body": "we can not check out of lock. The problem is that we need to make sure that we have a stricit linearization of connection operation. If you call disconnect and it succeed you know you are disconnected and no ongoing work from before will end up reconnecting you (not how we only add the connected node to the map after a successful connection). I think the right solution here is to add timeouts to the connections done from the pings? maybe an easy way is to have a different connection timeout for \"light\" connections then we do for normal ones. \n", "created_at": "2016-08-03T10:07:00Z" } ], "title": "Avoid zen pinging threads to pile up" }
{ "commits": [ { "message": "Avoid zen pinging threads to pile up\n\nThe Unicast Zen Ping service pings all known nodes every 3 seconds using a light connecting method. For nodes defined in the configuration as unicast hosts and not yet \"found by address\" (meaning that a successful connection has never been established to them) the node is added to a list of nodes to disconnect once the ping is terminated whatever the result of the ping. The round of pings is executed until a master is elected, but if no master can be elected (because of min master nodes or in case of a tribe client node with an offline remote cluster) the pings are executed over and over.\n\nThe thing is that nodes are pinged every 3s but the connection timeout is configured by default to 30s. This leads to a situation where many threads are created and added to the generic thread pool in order to disconnect from the node but the disconnect method TcpTransport.disconnectFromNode(DiscoveryNode node) blindly tries to acquire a lock on the node even if it will be impossible to disconnect from it (because node is not reachable). So disconnecting threads are stacked at the rate of 1 every 3sec until the generic thread pool is full.\n\nAdding a check in the TcpTransport.disconnectFromNode(DiscoveryNode node) similar to the check done in disconnectFromNode(DiscoveryNode node, Channel channel, String reason) helps to avoid threads to block for nothing.\n\nWe could also use a connection timeout of 3s when pinging nodes. This would help to fail connection faster and it would keep the number of blocking threads lower but would not resolve the main issue of threads blocking for nothing.\n\nThis settings can be used to reproduce the issue (check number of threads of generic thread pool):\n\ntribe.t1.cluster.name: \"offline\"\ntribe.t1.discovery.zen.ping.unicast.hosts:\n- '10.10.10.10'\n\nor\n\ndiscovery.zen.minimum_master_nodes: 2\ndiscovery.zen.ping.unicast.hosts:\n- '10.10.10.10'" }, { "message": "Revert changes in TcpTransport.java" }, { "message": "UnicastZenPing: Do not try to disconnect from unconnected nodes" } ], "files": [ { "diff": "@@ -46,7 +46,6 @@\n import org.elasticsearch.discovery.zen.ping.PingContextProvider;\n import org.elasticsearch.discovery.zen.ping.ZenPing;\n import org.elasticsearch.threadpool.ThreadPool;\n-import org.elasticsearch.transport.TransportResponseHandler;\n import org.elasticsearch.transport.ConnectTransportException;\n import org.elasticsearch.transport.RemoteTransportException;\n import org.elasticsearch.transport.TransportChannel;\n@@ -55,6 +54,7 @@\n import org.elasticsearch.transport.TransportRequestHandler;\n import org.elasticsearch.transport.TransportRequestOptions;\n import org.elasticsearch.transport.TransportResponse;\n+import org.elasticsearch.transport.TransportResponseHandler;\n import org.elasticsearch.transport.TransportService;\n \n import java.io.Closeable;\n@@ -217,7 +217,8 @@ public void clearTemporalResponses() {\n temporalResponses.clear();\n }\n \n- public PingResponse[] pingAndWait(TimeValue duration) {\n+ /** For testing purpose **/\n+ PingResponse[] pingAndWait(TimeValue duration) {\n final AtomicReference<PingResponse[]> response = new AtomicReference<>();\n final CountDownLatch latch = new CountDownLatch(1);\n ping(pings -> {\n@@ -316,7 +317,6 @@ public void close() {\n }\n }\n \n-\n void sendPings(final TimeValue timeout, @Nullable TimeValue waitTime, final SendPingsHandler sendPingsHandler) {\n final UnicastPingRequest pingRequest = new UnicastPingRequest();\n pingRequest.id = sendPingsHandler.id();\n@@ -380,7 +380,6 @@ void sendPings(final TimeValue timeout, @Nullable TimeValue waitTime, final Send\n logger.trace(\"replacing {} with temp node {}\", nodeToSend, tempNode);\n nodeToSend = tempNode;\n }\n- sendPingsHandler.nodeToDisconnect.add(nodeToSend);\n }\n // fork the connection to another thread\n final DiscoveryNode finalNodeToSend = nodeToSend;\n@@ -396,6 +395,9 @@ public void run() {\n if (!nodeFoundByAddress) {\n logger.trace(\"[{}] connecting (light) to {}\", sendPingsHandler.id(), finalNodeToSend);\n transportService.connectToNodeLightAndHandshake(finalNodeToSend, timeout.getMillis());\n+\n+ logger.trace(\"[{}] add node {} to the list of nodes to disconnect\", sendPingsHandler.id(), finalNodeToSend);\n+ sendPingsHandler.nodeToDisconnect.add(finalNodeToSend);\n } else {\n logger.trace(\"[{}] connecting to {}\", sendPingsHandler.id(), finalNodeToSend);\n transportService.connectToNode(finalNodeToSend);", "filename": "core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java", "status": "modified" }, { "diff": "@@ -93,6 +93,7 @@\n import java.util.concurrent.atomic.AtomicReference;\n import java.util.concurrent.locks.ReadWriteLock;\n import java.util.concurrent.locks.ReentrantReadWriteLock;\n+import java.util.function.Supplier;\n import java.util.regex.Matcher;\n import java.util.regex.Pattern;\n ", "filename": "core/src/main/java/org/elasticsearch/transport/TcpTransport.java", "status": "modified" }, { "diff": "@@ -0,0 +1,211 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.discovery.zen.ping.unicast;\n+\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.cluster.node.DiscoveryNodes;\n+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n+import org.elasticsearch.common.network.NetworkService;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.transport.TransportAddress;\n+import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.common.util.BigArrays;\n+import org.elasticsearch.discovery.zen.elect.ElectMasterService;\n+import org.elasticsearch.discovery.zen.ping.PingContextProvider;\n+import org.elasticsearch.discovery.zen.ping.ZenPing;\n+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n+import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.test.transport.MockTransportService;\n+import org.elasticsearch.threadpool.TestThreadPool;\n+import org.elasticsearch.threadpool.ThreadPool;\n+import org.elasticsearch.transport.MockTcpTransport;\n+import org.elasticsearch.transport.Transport;\n+import org.elasticsearch.transport.TransportService;\n+import org.junit.After;\n+import org.junit.AfterClass;\n+import org.junit.Before;\n+import org.junit.BeforeClass;\n+\n+import java.util.List;\n+import java.util.concurrent.CopyOnWriteArrayList;\n+\n+import static java.util.Collections.emptyList;\n+import static java.util.Collections.emptySet;\n+import static org.hamcrest.Matchers.allOf;\n+import static org.hamcrest.Matchers.arrayWithSize;\n+import static org.hamcrest.Matchers.hasItem;\n+import static org.hamcrest.Matchers.notNullValue;\n+\n+public class UnicastZenPingTests extends ESTestCase {\n+\n+ private static ThreadPool THREAD_POOL;\n+\n+ private TrackingMockTransportService transportA;\n+ private TrackingMockTransportService transportB;\n+ private DiscoveryNode nodeA;\n+ private DiscoveryNode nodeB;\n+\n+ @BeforeClass\n+ public static void createThreadPool() {\n+ THREAD_POOL = new TestThreadPool(UnicastZenPingTests.class.getName());\n+ }\n+\n+ @AfterClass\n+ public static void destroyThreadPool() throws InterruptedException {\n+ terminate(THREAD_POOL);\n+ THREAD_POOL = null;\n+ }\n+\n+ @Before\n+ public void startTransports() {\n+ transportA = createTransportService();\n+ nodeA = new DiscoveryNode(\"node_a\", transportA.boundAddress().publishAddress(), Version.CURRENT);\n+ transportA.setLocalNode(nodeA);\n+\n+ transportB = createTransportService();\n+ nodeB = new DiscoveryNode(\"node_b\", transportB.boundAddress().publishAddress(), Version.CURRENT);\n+ transportB.setLocalNode(nodeB);\n+ }\n+\n+ @After\n+ public void stopTransports() {\n+ transportA.stop();\n+ transportB.stop();\n+ }\n+\n+ /**\n+ * Test the Unicast Zen Ping responses when a node is unreachable at first and then becomes\n+ * reachable. It also ensure that no disconnections are issued to an unreachable node.\n+ */\n+ public void testPingUnreachableThenReachableNode() throws Exception {\n+ try (\n+ UnicastZenPing zenPingB = createUnicastZenPing(transportB, nodeA.getAddress().toString());\n+ UnicastZenPing zenPingA = createUnicastZenPing(transportA, nodeB.getAddress().toString());\n+ ) {\n+ PingContextProvider contextA = createPingContextProvider(nodeA, null);\n+ assertNull(\"Node A must not resolve Node B by address\", contextA.nodes().findByAddress(nodeB.getAddress()));\n+ zenPingA.setPingContextProvider(contextA);\n+ zenPingA.start();\n+\n+ PingContextProvider contextB = createPingContextProvider(nodeB, nodeA);\n+ assertNotNull(\"Node B must resolve Node A by address\", contextB.nodes().findByAddress(nodeA.getAddress()));\n+ zenPingB.setPingContextProvider(contextB);\n+ zenPingB.start();\n+\n+ logger.trace(\"Node A can't reach Node B\");\n+ transportA.addFailToSendNoConnectRule(nodeB.getAddress());\n+\n+ logger.trace(\"Node A pings Node B, no response is expected\");\n+ ZenPing.PingResponse[] pings = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1L));\n+ assertThat(pings, allOf(notNullValue(), arrayWithSize(0)));\n+\n+ logger.trace(\"Node A has no connection to Node B and did not initiate a disconnection\");\n+ assertFalse(transportA.nodeConnected(nodeB));\n+ assertTrue(transportA.getDisconnects().isEmpty());\n+\n+ logger.trace(\"Node A can now reach Node B\");\n+ transportA.clearAllRules();\n+\n+ logger.trace(\"Node A pings Node B, one successful ping response is expected\");\n+ pings = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1L));\n+ assertThat(pings, arrayWithSize(1));\n+\n+ logger.trace(\"Node B pings Node A, one successful ping response is expected\");\n+ pings = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1L));\n+ assertThat(pings, arrayWithSize(1));\n+\n+ logger.trace(\"Node B kept the connection to Node A because it has been resolved by address\");\n+ assertBusy(() -> {\n+ assertTrue(transportB.nodeConnected(nodeA));\n+ assertTrue(transportB.getDisconnects().isEmpty());\n+ });\n+\n+ logger.trace(\"Node A closed the connection to Node B\");\n+ assertBusy(() -> {\n+ assertFalse(transportA.nodeConnected(nodeB));\n+ assertThat(transportA.getDisconnects(), hasItem(nodeB.getAddress()));\n+ });\n+ }\n+ }\n+\n+ private TrackingMockTransportService createTransportService() {\n+ MockTcpTransport transport =\n+ new MockTcpTransport(\n+ Settings.EMPTY,\n+ THREAD_POOL,\n+ BigArrays.NON_RECYCLING_INSTANCE,\n+ new NoneCircuitBreakerService(),\n+ new NamedWriteableRegistry(emptyList()),\n+ new NetworkService(Settings.EMPTY, emptyList()));\n+\n+ TrackingMockTransportService transportService = new TrackingMockTransportService(Settings.EMPTY, transport, THREAD_POOL);\n+ transportService.start();\n+ transportService.acceptIncomingRequests();\n+ return transportService;\n+ }\n+\n+ private UnicastZenPing createUnicastZenPing(TransportService transportService, String... unicastHosts) {\n+ Settings settings = Settings.builder()\n+ .putArray(UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), unicastHosts)\n+ .build();\n+ return new UnicastZenPing(settings, THREAD_POOL, transportService, new ElectMasterService(settings), emptySet());\n+ }\n+\n+ private PingContextProvider createPingContextProvider(DiscoveryNode local, DiscoveryNode other) {\n+ return new PingContextProvider() {\n+ @Override\n+ public boolean nodeHasJoinedClusterOnce() {\n+ return false;\n+ }\n+\n+ @Override\n+ public DiscoveryNodes nodes() {\n+ DiscoveryNodes.Builder builder = DiscoveryNodes.builder().localNodeId(local.getId()).add(local);\n+ if (other != null) {\n+ builder.add(other);\n+ }\n+ return builder.build();\n+ }\n+ };\n+ }\n+\n+ /**\n+ * A MockTransportService that tracks the number of disconnect attempts\n+ **/\n+ static class TrackingMockTransportService extends MockTransportService {\n+\n+ private final List<TransportAddress> disconnects = new CopyOnWriteArrayList<>();\n+\n+ TrackingMockTransportService(Settings settings, Transport transport, ThreadPool threadPool) {\n+ super(settings, transport, threadPool);\n+ }\n+\n+ @Override\n+ public void disconnectFromNode(DiscoveryNode node) {\n+ disconnects.add(node.getAddress());\n+ super.disconnectFromNode(node);\n+ }\n+\n+ List<TransportAddress> getDisconnects() {\n+ return disconnects;\n+ }\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java", "status": "added" } ] }
{ "body": "```\n 2> REPRODUCE WITH: gradle :core:integTest -Dtests.seed=A4648847991E5C27 -Dtests.class=org.elasticsearch.search.aggregations.EquivalenceIT -Dtests.method=\"testRandomRanges\" -Dtests.security.manager=true -Dtests.jvms=12 -Dtests.locale=nn-NO -Dtests.timezone=Europe/Helsinki\nFAILURE 5.55s J10 | EquivalenceIT.testRandomRanges <<< FAILURES!\n > Throwable #1: java.lang.AssertionError: 4 expected:<889> but was:<890>\n > at __randomizedtesting.SeedInfo.seed([A4648847991E5C27:D00DCF66E03EFC10]:0)\n > at org.elasticsearch.search.aggregations.EquivalenceIT.testRandomRanges(EquivalenceIT.java:186)\n > at java.lang.Thread.run(Thread.java:745)\n```\n", "comments": [ { "body": "@nik9000 I have done some investigations about this test failed, this is caused when use seed: **A4648847991E5C27**, it will generate one number: **83.99999909626781**, but for **idx type** without set values mapping, it choose the **float type** mapping for it, this will caused rounding. so it fail. \n", "created_at": "2016-07-30T12:51:55Z" }, { "body": "I have created a pull request for fixing this: https://github.com/elastic/elasticsearch/pull/19701 \n:) \n", "created_at": "2016-07-30T12:53:19Z" } ], "number": 19697, "title": "EquivalenceIT fails reproducibly with a seed" }
{ "body": "Closes #19697 \n", "number": 19701, "review_comments": [], "title": "fix testRandomRanges failed with -Dtest.seed A4648847991E5C27" }
{ "commits": [ { "message": "fix testRandomRanges failed with -Dtest.seed A4648847991E5C27" }, { "message": "set double value to double type mapping in EquivalenceIT" } ], "files": [ { "diff": "@@ -103,7 +103,18 @@ public void testRandomRanges() throws Exception {\n }\n }\n \n- createIndex(\"idx\");\n+ prepareCreate(\"idx\")\n+ .addMapping(\"type\", jsonBuilder()\n+ .startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"values\")\n+ .field(\"type\", \"double\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()).execute().actionGet();\n+\n for (int i = 0; i < docs.length; ++i) {\n XContentBuilder source = jsonBuilder()\n .startObject()\n@@ -202,6 +213,9 @@ public void testDuelTerms() throws Exception {\n .startObject()\n .startObject(\"type\")\n .startObject(\"properties\")\n+ .startObject(\"num\")\n+ .field(\"type\", \"double\")\n+ .endObject()\n .startObject(\"string_values\")\n .field(\"type\", \"keyword\")\n .startObject(\"fields\")\n@@ -323,7 +337,18 @@ public void testDuelTerms() throws Exception {\n \n // Duel between histograms and scripted terms\n public void testDuelTermsHistogram() throws Exception {\n- createIndex(\"idx\");\n+ prepareCreate(\"idx\")\n+ .addMapping(\"type\", jsonBuilder()\n+ .startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"num\")\n+ .field(\"type\", \"double\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()).execute().actionGet();\n+\n \n final int numDocs = scaledRandomIntBetween(500, 5000);\n final int maxNumTerms = randomIntBetween(10, 2000);\n@@ -383,7 +408,17 @@ public void testDuelTermsHistogram() throws Exception {\n \n public void testLargeNumbersOfPercentileBuckets() throws Exception {\n // test high numbers of percentile buckets to make sure paging and release work correctly\n- createIndex(\"idx\");\n+ prepareCreate(\"idx\")\n+ .addMapping(\"type\", jsonBuilder()\n+ .startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"double_value\")\n+ .field(\"type\", \"double\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()).execute().actionGet();\n \n final int numDocs = scaledRandomIntBetween(2500, 5000);\n logger.info(\"Indexing [{}] docs\", numDocs);", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java", "status": "modified" } ] }
{ "body": "We currently have concurrency issue between the static methods on the Store class and store changes that are done via a valid open store. An example of this is the async shard fetch which can reach out to a node while a local shard copy is shutting down (the fetch does check if we have an open shard and tries to use that first, but if the shard is shutting down, it will not be available from IndexService).\n\nSpecifically, async shard fetching tries to read metadata from store, concurrently the shard that shuts down commits to lucene, changing the segments_N file. this causes a file not find exception on the shard fetching side. That one in turns makes the master think the shard is unusable. In tests this can cause the shard assignment to be delayed (up to 1m) which fails tests. See https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+java9-periodic/570 for details.\n\nThis is one of the things #18938 caused to bubble up.\n", "comments": [ { "body": "left on nit - LGTM otherwise\n", "created_at": "2016-07-18T08:27:01Z" }, { "body": "LGTM\n", "created_at": "2016-07-18T08:30:14Z" } ], "number": 19416, "title": "Make static Store access shard lock aware" }
{ "body": "In several places in our code we need to get a consistent list of files + metadata of the current index. We currently have a couple of ways to do in the `Store` class, which also does the right things and tries to verify the integrity of the smaller files. Sadly, those methods can run into trouble if anyone writes into the folder while they are busy. Most notably, the index shard's engine decides to commit half way and remove a `segment_N` file before the store got to checksum (but did already list it). This race condition typically doesn't happen as almost all of the places where we list files also happen to be places where the relevant shard doesn't yet have an engine. There is however an exception (of course :)) which is the API to list shard stores, used by the master when it is looking for shard copies to assign to.\n\nI already took one shot at fixing this in #19416 , but it turns out not to be enough - see for example https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-os-compatibility/os=sles/822.\n\nThe first inclination to fix this was to add more locking to the different Store methods and acquire the `IndexWriter` lock, thus preventing any engine for accessing if if the a shard is offline and use the current index commit snapshotting logic already existing in `IndexShard` for when the engine is started. That turned out to be a bad idea as we create more subtleties where, for example, a store listing can prevent a shard from starting up (the writer lock doesn't wait if it can't get access, but fails immediately, which is good). Another example is running on a shared directory where some other engine may actually hold the lock.\n\nInstead I decided to take another approach:\n1) Remove all the various methods on store and keep one, which accepts an index commit (which can be null) and also clearly communicates that the _caller_ is responsible for concurrent access. This also tightens up the API which is a plus.\n2) Add a `snapshotStore` method to IndexShard that takes care of all the concurrency aspects with the engine, which is now possible because it's all in the same place. It's still a bit ugly but at least it's all in one place and we can evaluate how to improve on this later on. I also renamed the `snapshotIndex` method to `acquireIndexCommit` to avoid confusion and I think it communicates better what it does.\n", "number": 19684, "review_comments": [ { "body": "Hmm but what happens if the engine is closed just before, or while, we call `deletionPolicy.snapshot()` below?\n", "created_at": "2016-07-29T13:31:03Z" }, { "body": "Does the Lucene `write.lock` acquisition here have any risk of preventing the shard from starting up (in another thread)? Or do we ensure that no other instance of `IndexShard` would ever be created (than this one that already exists) for this index directory?\n", "created_at": "2016-07-29T13:41:42Z" }, { "body": "I think the deletion policy is safe to use then, but I agree this is all very icky, but at least it's in one place now so we can improve.\n", "created_at": "2016-07-29T13:44:35Z" }, { "body": "The mutex ensures that the engine will not be created while we're busy here. As far as other `IndexShard`s go - the `incRef` on the store here makes sure the store is not cleaned, so we still have the shard lock, so no other shard can start (but it will wait for a few seconds for us to complete)\n", "created_at": "2016-07-29T13:46:29Z" }, { "body": "OK indeed I looked at `SnapshotDeletionPolicy` and it looks OK if you pull a snapshot, and then IW closes (you get the last commit before IW closed, and any files it references will remain existing until the next IW is created), or if IW closes and you pull a snapshot (you get whatever IW committed on close). And its methods are sync'd.\n", "created_at": "2016-07-29T13:56:31Z" }, { "body": "since we incRef this store in both cases can we do it outside of the mutex instead?\n", "created_at": "2016-08-02T10:21:19Z" }, { "body": "I wonder if this really works ok in a ShadowIndexShard case. I wonder if we should just throw UOE there otherwise we might need to to some `Lucene.waitForIndex(store.directory(), nonexistentRetryTime)` as we do in the ShadowEngine\n", "created_at": "2016-08-02T10:39:02Z" }, { "body": "can we remove this?\n", "created_at": "2016-08-02T10:41:26Z" }, { "body": "maybe just have one return statement at the end?\n", "created_at": "2016-08-02T10:41:49Z" }, { "body": "+1\n", "created_at": "2016-08-02T20:49:09Z" }, { "body": "removed\n", "created_at": "2016-08-02T20:49:12Z" }, { "body": "I rewrote it\n", "created_at": "2016-08-02T20:49:29Z" }, { "body": "I was thinking about it too but decided not tackle it at the end. Thinking about it again, a UOE for now is the simplest - we really don't know what the primary will do\n", "created_at": "2016-08-02T21:07:25Z" } ], "title": "Tighten up concurrent store metadata listing and engine writes" }
{ "commits": [ { "message": "add locking to store access" }, { "message": "fix some tests" }, { "message": "more logging" }, { "message": "add a direct access to the store, so engine maybe open or close" }, { "message": "tests ands some fixing" }, { "message": "fix shardow recovery" }, { "message": "move recovery target service to new indexShard.snapshotStore" }, { "message": "sigh" }, { "message": "sigh2" }, { "message": "reduce unsafe methods in Store" }, { "message": "fix npe" }, { "message": "Merge remote-tracking branch 'upstream/master' into store_metadata_access" }, { "message": "to @mikemccand with love" }, { "message": "feedback" }, { "message": "merge from master" }, { "message": "rewrite the right thing" }, { "message": "add UOE to ShadowIndexshards" } ], "files": [ { "diff": "@@ -654,7 +654,7 @@ public void forceMerge(boolean flush) throws IOException {\n *\n * @param flushFirst indicates whether the engine should flush before returning the snapshot\n */\n- public abstract IndexCommit snapshotIndex(boolean flushFirst) throws EngineException;\n+ public abstract IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException;\n \n /**\n * fail engine due to some error. the engine will also be closed.", "filename": "core/src/main/java/org/elasticsearch/index/engine/Engine.java", "status": "modified" }, { "diff": "@@ -852,7 +852,7 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu\n }\n \n @Override\n- public IndexCommit snapshotIndex(final boolean flushFirst) throws EngineException {\n+ public IndexCommit acquireIndexCommit(final boolean flushFirst) throws EngineException {\n // we have to flush outside of the readlock otherwise we might have a problem upgrading\n // the to a write lock when we fail the engine in this operation\n if (flushFirst) {", "filename": "core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java", "status": "modified" }, { "diff": "@@ -205,7 +205,7 @@ public void refresh(String source) throws EngineException {\n }\n \n @Override\n- public IndexCommit snapshotIndex(boolean flushFirst) throws EngineException {\n+ public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException {\n throw new UnsupportedOperationException(\"Can not take snapshot from a shadow engine\");\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java", "status": "modified" }, { "diff": "@@ -21,14 +21,19 @@\n \n import org.apache.lucene.codecs.PostingsFormat;\n import org.apache.lucene.index.CheckIndex;\n+import org.apache.lucene.index.CorruptIndexException;\n import org.apache.lucene.index.IndexCommit;\n+import org.apache.lucene.index.IndexFormatTooNewException;\n+import org.apache.lucene.index.IndexFormatTooOldException;\n+import org.apache.lucene.index.IndexWriter;\n import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;\n import org.apache.lucene.index.SnapshotDeletionPolicy;\n import org.apache.lucene.index.Term;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.search.QueryCachingPolicy;\n import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;\n import org.apache.lucene.store.AlreadyClosedException;\n+import org.apache.lucene.store.Lock;\n import org.apache.lucene.util.IOUtils;\n import org.apache.lucene.util.ThreadInterruptedException;\n import org.elasticsearch.ElasticsearchException;\n@@ -116,10 +121,12 @@\n import org.elasticsearch.search.suggest.completion2x.Completion090PostingsFormat;\n import org.elasticsearch.threadpool.ThreadPool;\n \n+import java.io.FileNotFoundException;\n import java.io.IOException;\n import java.io.PrintStream;\n import java.nio.channels.ClosedByInterruptException;\n import java.nio.charset.StandardCharsets;\n+import java.nio.file.NoSuchFileException;\n import java.util.ArrayList;\n import java.util.EnumSet;\n import java.util.List;\n@@ -789,29 +796,66 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() {\n \n /**\n * Creates a new {@link IndexCommit} snapshot form the currently running engine. All resources referenced by this\n- * commit won't be freed until the commit / snapshot is released via {@link #releaseSnapshot(IndexCommit)}.\n+ * commit won't be freed until the commit / snapshot is released via {@link #releaseIndexCommit(IndexCommit)}.\n *\n * @param flushFirst <code>true</code> if the index should first be flushed to disk / a low level lucene commit should be executed\n */\n- public IndexCommit snapshotIndex(boolean flushFirst) throws EngineException {\n+ public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException {\n IndexShardState state = this.state; // one time volatile read\n // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine\n if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {\n- return getEngine().snapshotIndex(flushFirst);\n+ return getEngine().acquireIndexCommit(flushFirst);\n } else {\n throw new IllegalIndexShardStateException(shardId, state, \"snapshot is not allowed\");\n }\n }\n \n \n /**\n- * Releases a snapshot taken from {@link #snapshotIndex(boolean)} this must be called to release the resources\n+ * Releases a snapshot taken from {@link #acquireIndexCommit(boolean)} this must be called to release the resources\n * referenced by the given snapshot {@link IndexCommit}.\n */\n- public void releaseSnapshot(IndexCommit snapshot) throws IOException {\n+ public void releaseIndexCommit(IndexCommit snapshot) throws IOException {\n deletionPolicy.release(snapshot);\n }\n \n+ /**\n+ * gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard,\n+ * without having to worry about the current state of the engine and concurrent flushes.\n+ *\n+ * @throws org.apache.lucene.index.IndexNotFoundException if no index is found in the current directory\n+ * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an\n+ * unexpected exception when opening the index reading the segments file.\n+ * @throws IndexFormatTooOldException if the lucene index is too old to be opened.\n+ * @throws IndexFormatTooNewException if the lucene index is too new to be opened.\n+ * @throws FileNotFoundException if one or more files referenced by a commit are not present.\n+ * @throws NoSuchFileException if one or more files referenced by a commit are not present.\n+ */\n+ public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {\n+ IndexCommit indexCommit = null;\n+ store.incRef();\n+ try {\n+ synchronized (mutex) {\n+ // if the engine is not running, we can access the store directly, but we need to make sure no one starts\n+ // the engine on us. If the engine is running, we can get a snapshot via the deletion policy which is initialized.\n+ // That can be done out of mutex, since the engine can be closed half way.\n+ Engine engine = getEngineOrNull();\n+ if (engine == null) {\n+ try (Lock ignored = store.directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {\n+ return store.getMetadata(null);\n+ }\n+ }\n+ }\n+ indexCommit = deletionPolicy.snapshot();\n+ return store.getMetadata(indexCommit);\n+ } finally {\n+ store.decRef();\n+ if (indexCommit != null) {\n+ deletionPolicy.release(indexCommit);\n+ }\n+ }\n+ }\n+\n /**\n * Fails the shard and marks the shard store as corrupted if\n * <code>e</code> is caused by index corruption\n@@ -1310,7 +1354,7 @@ private void doCheckIndex() throws IOException {\n if (\"checksum\".equals(checkIndexOnStartup)) {\n // physical verification only: verify all checksums for the latest commit\n IOException corrupt = null;\n- MetadataSnapshot metadata = store.getMetadata();\n+ MetadataSnapshot metadata = snapshotStoreMetadata();\n for (Map.Entry<String, StoreFileMetaData> entry : metadata.asMap().entrySet()) {\n try {\n Store.checkIntegrity(entry.getValue(), store.directory());", "filename": "core/src/main/java/org/elasticsearch/index/shard/IndexShard.java", "status": "modified" }, { "diff": "@@ -23,15 +23,13 @@\n import org.apache.lucene.store.Directory;\n import org.apache.lucene.store.FilterDirectory;\n import org.apache.lucene.store.IOContext;\n-import org.apache.lucene.store.IndexInput;\n import org.apache.lucene.store.IndexOutput;\n import org.apache.lucene.store.Lock;\n import org.apache.lucene.store.NoLockFactory;\n import org.elasticsearch.cluster.metadata.MappingMetaData;\n import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.store.Store;\n-import org.elasticsearch.indices.recovery.RecoveryState;\n \n import java.io.Closeable;\n import java.io.IOException;\n@@ -52,7 +50,7 @@ public LocalShardSnapshot(IndexShard shard) {\n store.incRef();\n boolean success = false;\n try {\n- indexCommit = shard.snapshotIndex(true);\n+ indexCommit = shard.acquireIndexCommit(true);\n success = true;\n } finally {\n if (success == false) {\n@@ -120,7 +118,7 @@ public void close() throws IOException {\n public void close() throws IOException {\n if (closed.compareAndSet(false, true)) {\n try {\n- shard.releaseSnapshot(indexCommit);\n+ shard.releaseIndexCommit(indexCommit);\n } finally {\n store.decRef();\n }", "filename": "core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java", "status": "modified" }, { "diff": "@@ -109,4 +109,9 @@ public TranslogStats translogStats() {\n public void addRefreshListener(Translog.Location location, Consumer<Boolean> listener) {\n throw new UnsupportedOperationException(\"Can't listen for a refresh on a shadow engine because it doesn't have a translog\");\n }\n+\n+ @Override\n+ public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {\n+ throw new UnsupportedOperationException(\"can't snapshot the directory as the primary may change it underneath us\");\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java", "status": "modified" }, { "diff": "@@ -73,6 +73,7 @@\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.engine.Engine;\n import org.elasticsearch.index.shard.AbstractIndexShardComponent;\n+import org.elasticsearch.index.shard.IndexShard;\n import org.elasticsearch.index.shard.ShardId;\n \n import java.io.Closeable;\n@@ -208,45 +209,17 @@ final void ensureOpen() {\n }\n }\n \n- /**\n- * Returns a new MetadataSnapshot for the latest commit in this store or\n- * an empty snapshot if no index exists or can not be opened.\n- *\n- * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an\n- * unexpected exception when opening the index reading the segments file.\n- * @throws IndexFormatTooOldException if the lucene index is too old to be opened.\n- * @throws IndexFormatTooNewException if the lucene index is too new to be opened.\n- */\n- public MetadataSnapshot getMetadataOrEmpty() throws IOException {\n- try {\n- return getMetadata(null);\n- } catch (IndexNotFoundException ex) {\n- // that's fine - happens all the time no need to log\n- } catch (FileNotFoundException | NoSuchFileException ex) {\n- logger.info(\"Failed to open / find files while reading metadata snapshot\");\n- }\n- return MetadataSnapshot.EMPTY;\n- }\n-\n- /**\n- * Returns a new MetadataSnapshot for the latest commit in this store.\n- *\n- * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an\n- * unexpected exception when opening the index reading the segments file.\n- * @throws IndexFormatTooOldException if the lucene index is too old to be opened.\n- * @throws IndexFormatTooNewException if the lucene index is too new to be opened.\n- * @throws FileNotFoundException if one or more files referenced by a commit are not present.\n- * @throws NoSuchFileException if one or more files referenced by a commit are not present.\n- * @throws IndexNotFoundException if no index / valid commit-point can be found in this store\n- */\n- public MetadataSnapshot getMetadata() throws IOException {\n- return getMetadata(null);\n- }\n-\n /**\n * Returns a new MetadataSnapshot for the given commit. If the given commit is <code>null</code>\n * the latest commit point is used.\n *\n+ * Note that this method requires the caller verify it has the right to access the store and\n+ * no concurrent file changes are happening. If in doubt, you probably want to use one of the following:\n+ *\n+ * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, ESLogger)} to read a meta data while locking\n+ * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard\n+ * {@link IndexShard#acquireIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed\n+ *\n * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an\n * unexpected exception when opening the index reading the segments file.\n * @throws IndexFormatTooOldException if the lucene index is too old to be opened.\n@@ -634,7 +607,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr\n // ignore, we don't really care, will get deleted later on\n }\n }\n- final Store.MetadataSnapshot metadataOrEmpty = getMetadata();\n+ final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);\n verifyAfterCleanup(sourceMetaData, metadataOrEmpty);\n } finally {\n metadataLock.writeLock().unlock();", "filename": "core/src/main/java/org/elasticsearch/index/store/Store.java", "status": "modified" }, { "diff": "@@ -127,7 +127,7 @@ public RecoveryResponse recoverToTarget() throws IOException {\n logger.trace(\"captured translog id [{}] for recovery\", translogView.minTranslogGeneration());\n final IndexCommit phase1Snapshot;\n try {\n- phase1Snapshot = shard.snapshotIndex(false);\n+ phase1Snapshot = shard.acquireIndexCommit(false);\n } catch (Exception e) {\n IOUtils.closeWhileHandlingException(translogView);\n throw new RecoveryEngineException(shard.shardId(), 1, \"Snapshot failed\", e);\n@@ -139,7 +139,7 @@ public RecoveryResponse recoverToTarget() throws IOException {\n throw new RecoveryEngineException(shard.shardId(), 1, \"phase1 failed\", e);\n } finally {\n try {\n- shard.releaseSnapshot(phase1Snapshot);\n+ shard.releaseIndexCommit(phase1Snapshot);\n } catch (IOException ex) {\n logger.warn(\"releasing snapshot caused exception\", ex);\n }", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java", "status": "modified" }, { "diff": "@@ -167,7 +167,13 @@ private void doRecovery(final RecoveryTarget recoveryTarget) {\n logger.trace(\"collecting local files for {}\", recoveryTarget);\n Store.MetadataSnapshot metadataSnapshot = null;\n try {\n- metadataSnapshot = recoveryTarget.store().getMetadataOrEmpty();\n+ if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) {\n+ // we are not going to copy any files, so don't bother listing files, potentially running\n+ // into concurrency issues with the primary changing files underneath us.\n+ metadataSnapshot = Store.MetadataSnapshot.EMPTY;\n+ } else {\n+ metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata();\n+ }\n } catch (IOException e) {\n logger.warn(\"error while listing local files, recover as if there are none\", e);\n metadataSnapshot = Store.MetadataSnapshot.EMPTY;\n@@ -178,6 +184,7 @@ private void doRecovery(final RecoveryTarget recoveryTarget) {\n new RecoveryFailedException(recoveryTarget.state(), \"failed to list local files\", e), true);\n return;\n }\n+ logger.trace(\"{} local file count: [{}]\", recoveryTarget, metadataSnapshot.size());\n final StartRecoveryRequest request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(),\n clusterService.localNode(),\n metadataSnapshot, recoveryTarget.state().getType(), recoveryTarget.recoveryId());", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java", "status": "modified" }, { "diff": "@@ -123,14 +123,8 @@ private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException\n if (indexService != null) {\n IndexShard indexShard = indexService.getShardOrNull(shardId.id());\n if (indexShard != null) {\n- final Store store = indexShard.store();\n- store.incRef();\n- try {\n- exists = true;\n- return new StoreFilesMetaData(shardId, store.getMetadataOrEmpty());\n- } finally {\n- store.decRef();\n- }\n+ exists = true;\n+ return new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata());\n }\n }\n // try and see if we an list unallocated", "filename": "core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java", "status": "modified" }, { "diff": "@@ -23,14 +23,12 @@\n import org.elasticsearch.cluster.metadata.MetaData;\n import org.elasticsearch.cluster.metadata.RepositoryMetaData;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n-import org.elasticsearch.common.settings.Settings;\n-import org.elasticsearch.env.Environment;\n-import org.elasticsearch.index.shard.IndexShard;\n-import org.elasticsearch.indices.recovery.RecoveryState;\n-import org.elasticsearch.snapshots.SnapshotId;\n import org.elasticsearch.common.component.LifecycleComponent;\n+import org.elasticsearch.index.shard.IndexShard;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;\n+import org.elasticsearch.indices.recovery.RecoveryState;\n+import org.elasticsearch.snapshots.SnapshotId;\n import org.elasticsearch.snapshots.SnapshotInfo;\n import org.elasticsearch.snapshots.SnapshotShardFailure;\n \n@@ -174,7 +172,7 @@ interface Factory {\n /**\n * Creates a snapshot of the shard based on the index commit point.\n * <p>\n- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#snapshotIndex} method.\n+ * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireIndexCommit} method.\n * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller.\n * <p>\n * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check", "filename": "core/src/main/java/org/elasticsearch/repositories/Repository.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.apache.lucene.index.IndexCommit;\n import org.apache.lucene.index.IndexFormatTooNewException;\n import org.apache.lucene.index.IndexFormatTooOldException;\n+import org.apache.lucene.index.IndexNotFoundException;\n import org.apache.lucene.index.IndexWriter;\n import org.apache.lucene.index.IndexWriterConfig;\n import org.apache.lucene.index.SegmentInfos;\n@@ -40,29 +41,6 @@\n import org.elasticsearch.cluster.metadata.RepositoryMetaData;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.common.Numbers;\n-import org.elasticsearch.common.collect.Tuple;\n-import org.elasticsearch.common.lucene.Lucene;\n-import org.elasticsearch.common.lucene.store.InputStreamIndexInput;\n-import org.elasticsearch.common.settings.Settings;\n-import org.elasticsearch.common.util.iterable.Iterables;\n-import org.elasticsearch.common.util.set.Sets;\n-import org.elasticsearch.common.xcontent.ToXContent;\n-import org.elasticsearch.index.shard.IndexShard;\n-import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;\n-import org.elasticsearch.index.snapshots.IndexShardSnapshotException;\n-import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException;\n-import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;\n-import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot;\n-import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots;\n-import org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream;\n-import org.elasticsearch.index.snapshots.blobstore.SlicedInputStream;\n-import org.elasticsearch.index.snapshots.blobstore.SnapshotFiles;\n-import org.elasticsearch.index.store.Store;\n-import org.elasticsearch.index.store.StoreFileMetaData;\n-import org.elasticsearch.indices.recovery.RecoveryState;\n-import org.elasticsearch.repositories.IndexId;\n-import org.elasticsearch.repositories.RepositoryData;\n-import org.elasticsearch.snapshots.SnapshotId;\n import org.elasticsearch.common.ParseFieldMatcher;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.UUIDs;\n@@ -72,26 +50,49 @@\n import org.elasticsearch.common.blobstore.BlobStore;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.bytes.BytesReference;\n+import org.elasticsearch.common.collect.Tuple;\n import org.elasticsearch.common.component.AbstractLifecycleComponent;\n import org.elasticsearch.common.compress.NotXContentException;\n import org.elasticsearch.common.io.Streams;\n import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.lucene.Lucene;\n+import org.elasticsearch.common.lucene.store.InputStreamIndexInput;\n import org.elasticsearch.common.metrics.CounterMetric;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.ByteSizeUnit;\n import org.elasticsearch.common.unit.ByteSizeValue;\n+import org.elasticsearch.common.util.iterable.Iterables;\n+import org.elasticsearch.common.util.set.Sets;\n+import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentHelper;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentType;\n+import org.elasticsearch.index.shard.IndexShard;\n import org.elasticsearch.index.shard.ShardId;\n+import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;\n+import org.elasticsearch.index.snapshots.IndexShardSnapshotException;\n+import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException;\n+import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;\n+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot;\n+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots;\n+import org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream;\n+import org.elasticsearch.index.snapshots.blobstore.SlicedInputStream;\n+import org.elasticsearch.index.snapshots.blobstore.SnapshotFiles;\n+import org.elasticsearch.index.store.Store;\n+import org.elasticsearch.index.store.StoreFileMetaData;\n+import org.elasticsearch.indices.recovery.RecoveryState;\n+import org.elasticsearch.repositories.IndexId;\n import org.elasticsearch.repositories.Repository;\n+import org.elasticsearch.repositories.RepositoryData;\n import org.elasticsearch.repositories.RepositoryException;\n import org.elasticsearch.repositories.RepositoryVerificationException;\n import org.elasticsearch.snapshots.SnapshotCreationException;\n import org.elasticsearch.snapshots.SnapshotException;\n+import org.elasticsearch.snapshots.SnapshotId;\n import org.elasticsearch.snapshots.SnapshotInfo;\n import org.elasticsearch.snapshots.SnapshotMissingException;\n import org.elasticsearch.snapshots.SnapshotShardFailure;\n@@ -1444,7 +1445,7 @@ protected InputStream openSlice(long slice) throws IOException {\n */\n private class RestoreContext extends Context {\n \n- private final Store store;\n+ private final IndexShard targetShard;\n \n private final RecoveryState recoveryState;\n \n@@ -1460,13 +1461,14 @@ private class RestoreContext extends Context {\n public RestoreContext(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {\n super(snapshotId, version, indexId, shard.shardId(), snapshotShardId);\n this.recoveryState = recoveryState;\n- store = shard.store();\n+ this.targetShard = shard;\n }\n \n /**\n * Performs restore operation\n */\n public void restore() throws IOException {\n+ final Store store = targetShard.store();\n store.incRef();\n try {\n logger.debug(\"[{}] [{}] restoring to [{}] ...\", snapshotId, metadata.name(), shardId);\n@@ -1491,12 +1493,16 @@ public void restore() throws IOException {\n }\n \n SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());\n- final Store.MetadataSnapshot recoveryTargetMetadata;\n+ Store.MetadataSnapshot recoveryTargetMetadata;\n try {\n- recoveryTargetMetadata = store.getMetadataOrEmpty();\n- } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {\n- logger.warn(\"{} Can't read metadata from store\", e, shardId);\n- throw new IndexShardRestoreFailedException(shardId, \"Can't restore corrupted shard\", e);\n+ recoveryTargetMetadata = targetShard.snapshotStoreMetadata();\n+ } catch (IndexNotFoundException e) {\n+ // happens when restore to an empty shard, not a big deal\n+ logger.trace(\"[{}] [{}] restoring from to an empty shard\", shardId, snapshotId);\n+ recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;\n+ } catch (IOException e) {\n+ logger.warn(\"{} Can't read metadata from store, will not reuse any local file while restoring\", e, shardId);\n+ recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;\n }\n \n final List<BlobStoreIndexShardSnapshot.FileInfo> filesToRecover = new ArrayList<>();\n@@ -1550,7 +1556,7 @@ public void restore() throws IOException {\n try {\n for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {\n logger.trace(\"[{}] [{}] restoring file [{}]\", shardId, snapshotId, fileToRecover.name());\n- restoreFile(fileToRecover);\n+ restoreFile(fileToRecover, store);\n }\n } catch (IOException ex) {\n throw new IndexShardRestoreFailedException(shardId, \"Failed to recover index\", ex);\n@@ -1597,7 +1603,7 @@ public void restore() throws IOException {\n *\n * @param fileInfo file to be restored\n */\n- private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo) throws IOException {\n+ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final Store store) throws IOException {\n boolean success = false;\n \n try (InputStream partSliceStream = new PartSliceStream(blobContainer, fileInfo)) {", "filename": "core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java", "status": "modified" }, { "diff": "@@ -348,7 +348,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina\n \n try {\n // we flush first to make sure we get the latest writes snapshotted\n- IndexCommit snapshotIndexCommit = indexShard.snapshotIndex(true);\n+ IndexCommit snapshotIndexCommit = indexShard.acquireIndexCommit(true);\n try {\n repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotIndexCommit, snapshotStatus);\n if (logger.isDebugEnabled()) {\n@@ -358,7 +358,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina\n TimeValue.timeValueMillis(snapshotStatus.time()), sb);\n }\n } finally {\n- indexShard.releaseSnapshot(snapshotIndexCommit);\n+ indexShard.releaseIndexCommit(snapshotIndexCommit);\n }\n } catch (SnapshotFailedEngineException e) {\n throw e;", "filename": "core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n package org.elasticsearch.index.replication;\n \n import org.apache.lucene.document.Document;\n+import org.apache.lucene.index.IndexNotFoundException;\n import org.apache.lucene.index.LeafReader;\n import org.apache.lucene.index.LeafReaderContext;\n import org.apache.lucene.store.AlreadyClosedException;\n@@ -299,14 +300,28 @@ public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryN\n replica.prepareForIndexRecovery();\n RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);\n StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,\n- replica.store().getMetadataOrEmpty(), RecoveryState.Type.REPLICA, 0);\n+ getMetadataSnapshotOrEmpty(replica), RecoveryState.Type.REPLICA, 0);\n RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {},\n (int) ByteSizeUnit.MB.toKB(1), logger);\n recovery.recoverToTarget();\n recoveryTarget.markAsDone();\n replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));\n }\n \n+ private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {\n+ Store.MetadataSnapshot result;\n+ try {\n+ result = replica.snapshotStoreMetadata();\n+ } catch (IndexNotFoundException e) {\n+ // OK!\n+ result = Store.MetadataSnapshot.EMPTY;\n+ } catch (IOException e) {\n+ logger.warn(\"{} failed read store, treating as empty\", e);\n+ result = Store.MetadataSnapshot.EMPTY;\n+ }\n+ return result;\n+ }\n+\n public synchronized DiscoveryNode getPrimaryNode() {\n return getDiscoveryNode(primary.routingEntry().currentNodeId());\n }", "filename": "core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.apache.lucene.index.CorruptIndexException;\n import org.apache.lucene.index.DirectoryReader;\n import org.apache.lucene.index.IndexCommit;\n+import org.apache.lucene.index.IndexReader;\n import org.apache.lucene.index.Term;\n import org.apache.lucene.search.IndexSearcher;\n import org.apache.lucene.search.TermQuery;\n@@ -156,6 +157,7 @@\n import static org.hamcrest.Matchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.greaterThan;\n+import static org.hamcrest.Matchers.hasSize;\n \n /**\n * Simple unit-test IndexShard related operations.\n@@ -476,6 +478,76 @@ public static void write(ShardStateMetaData shardStateMetaData,\n ShardStateMetaData.FORMAT.write(shardStateMetaData, shardPaths);\n }\n \n+ public void testAcquireIndexCommit() throws IOException {\n+ createIndex(\"test\");\n+ ensureGreen();\n+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);\n+ IndexService test = indicesService.indexService(resolveIndex(\"test\"));\n+ final IndexShard shard = test.getShardOrNull(0);\n+ int numDocs = randomInt(20);\n+ for (int i = 0; i < numDocs; i++) {\n+ client().prepareIndex(\"test\", \"type\", \"id_\" + i).setSource(\"{}\").get();\n+ }\n+ final boolean flushFirst = randomBoolean();\n+ IndexCommit commit = shard.acquireIndexCommit(flushFirst);\n+ int moreDocs = randomInt(20);\n+ for (int i = 0; i < moreDocs; i++) {\n+ client().prepareIndex(\"test\", \"type\", \"id_\" + numDocs + i).setSource(\"{}\").get();\n+ }\n+ shard.flush(new FlushRequest(\"index\"));\n+ // check that we can still read the commit that we captured\n+ try (IndexReader reader = DirectoryReader.open(commit)) {\n+ assertThat(reader.numDocs(), equalTo(flushFirst ? numDocs : 0));\n+ }\n+ shard.releaseIndexCommit(commit);\n+ shard.flush(new FlushRequest(\"index\").force(true));\n+ // check it's clean up\n+ assertThat(DirectoryReader.listCommits(shard.store().directory()), hasSize(1));\n+ }\n+\n+ /***\n+ * test one can snapshot the store at various lifecycle stages\n+ */\n+ public void testSnapshotStore() throws IOException {\n+ createIndex(\"test\");\n+ ensureGreen();\n+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);\n+ IndexService test = indicesService.indexService(resolveIndex(\"test\"));\n+ final IndexShard shard = test.getShardOrNull(0);\n+ client().prepareIndex(\"test\", \"test\", \"0\").setSource(\"{}\").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();\n+ client().admin().indices().prepareFlush().get();\n+ ShardRouting routing = shard.routingEntry();\n+ test.removeShard(0, \"b/c simon says so\");\n+ routing = ShardRoutingHelper.reinit(routing);\n+ IndexShard newShard = test.createShard(routing);\n+ newShard.updateRoutingEntry(routing);\n+ DiscoveryNode localNode = new DiscoveryNode(\"foo\", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);\n+\n+ Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata();\n+ assertThat(snapshot.getSegmentsFile().name(), equalTo(\"segments_2\"));\n+\n+ newShard.markAsRecovering(\"store\", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,\n+ localNode));\n+\n+ snapshot = newShard.snapshotStoreMetadata();\n+ assertThat(snapshot.getSegmentsFile().name(), equalTo(\"segments_2\"));\n+\n+ assertTrue(newShard.recoverFromStore());\n+\n+ snapshot = newShard.snapshotStoreMetadata();\n+ assertThat(snapshot.getSegmentsFile().name(), equalTo(\"segments_2\"));\n+\n+ newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted());\n+\n+ snapshot = newShard.snapshotStoreMetadata();\n+ assertThat(snapshot.getSegmentsFile().name(), equalTo(\"segments_2\"));\n+\n+ newShard.close(\"test\", false);\n+\n+ snapshot = newShard.snapshotStoreMetadata();\n+ assertThat(snapshot.getSegmentsFile().name(), equalTo(\"segments_2\"));\n+ }\n+\n public void testDurableFlagHasEffect() {\n createIndex(\"test\");\n ensureGreen();", "filename": "core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java", "status": "modified" }, { "diff": "@@ -328,15 +328,14 @@ public void testNewChecksums() throws IOException {\n Store.MetadataSnapshot metadata;\n // check before we committed\n try {\n- store.getMetadata();\n+ store.getMetadata(null);\n fail(\"no index present - expected exception\");\n } catch (IndexNotFoundException ex) {\n // expected\n }\n- assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed\n writer.commit();\n writer.close();\n- metadata = store.getMetadata();\n+ metadata = store.getMetadata(null);\n assertThat(metadata.asMap().isEmpty(), is(false));\n for (StoreFileMetaData meta : metadata) {\n try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {\n@@ -579,7 +578,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException {\n }\n writer.commit();\n writer.close();\n- first = store.getMetadata();\n+ first = store.getMetadata(null);\n assertDeleteContent(store, directoryService);\n store.close();\n }\n@@ -609,7 +608,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException {\n }\n writer.commit();\n writer.close();\n- second = store.getMetadata();\n+ second = store.getMetadata(null);\n }\n Store.RecoveryDiff diff = first.recoveryDiff(second);\n assertThat(first.size(), equalTo(second.size()));\n@@ -639,7 +638,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException {\n writer.deleteDocuments(new Term(\"id\", Integer.toString(random().nextInt(numDocs))));\n writer.commit();\n writer.close();\n- Store.MetadataSnapshot metadata = store.getMetadata();\n+ Store.MetadataSnapshot metadata = store.getMetadata(null);\n StoreFileMetaData delFile = null;\n for (StoreFileMetaData md : metadata) {\n if (md.name().endsWith(\".liv\")) {\n@@ -674,7 +673,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException {\n writer.addDocument(docs.get(0));\n writer.close();\n \n- Store.MetadataSnapshot newCommitMetaData = store.getMetadata();\n+ Store.MetadataSnapshot newCommitMetaData = store.getMetadata(null);\n Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);\n if (delFile != null) {\n assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment\n@@ -723,7 +722,7 @@ public void testCleanupFromSnapshot() throws IOException {\n writer.addDocument(doc);\n }\n \n- Store.MetadataSnapshot firstMeta = store.getMetadata();\n+ Store.MetadataSnapshot firstMeta = store.getMetadata(null);\n \n if (random().nextBoolean()) {\n for (int i = 0; i < docs; i++) {\n@@ -738,7 +737,7 @@ public void testCleanupFromSnapshot() throws IOException {\n writer.commit();\n writer.close();\n \n- Store.MetadataSnapshot secondMeta = store.getMetadata();\n+ Store.MetadataSnapshot secondMeta = store.getMetadata(null);\n \n \n if (randomBoolean()) {\n@@ -785,13 +784,10 @@ public void testOnCloseCallback() throws IOException {\n final AtomicInteger count = new AtomicInteger(0);\n final ShardLock lock = new DummyShardLock(shardId);\n \n- Store store = new Store(shardId, INDEX_SETTINGS, directoryService, lock, new Store.OnClose() {\n- @Override\n- public void handle(ShardLock theLock) {\n- assertEquals(shardId, theLock.getShardId());\n- assertEquals(lock, theLock);\n- count.incrementAndGet();\n- }\n+ Store store = new Store(shardId, INDEX_SETTINGS, directoryService, lock, theLock -> {\n+ assertEquals(shardId, theLock.getShardId());\n+ assertEquals(lock, theLock);\n+ count.incrementAndGet();\n });\n assertEquals(count.get(), 0);\n \n@@ -917,11 +913,7 @@ public void testUserDataRead() throws IOException {\n writer.commit();\n writer.close();\n Store.MetadataSnapshot metadata;\n- if (randomBoolean()) {\n- metadata = store.getMetadata();\n- } else {\n- metadata = store.getMetadata(deletionPolicy.snapshot());\n- }\n+ metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot());\n assertFalse(metadata.asMap().isEmpty());\n // do not check for correct files, we have enough tests for that above\n assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));\n@@ -982,7 +974,7 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException {\n \n try {\n if (randomBoolean()) {\n- store.getMetadata();\n+ store.getMetadata(null);\n } else {\n store.readLastCommittedSegmentsInfo();\n }", "filename": "core/src/test/java/org/elasticsearch/index/store/StoreTests.java", "status": "modified" }, { "diff": "@@ -97,7 +97,9 @@ public void testSendFiles() throws Throwable {\n writer.addDocument(document);\n }\n writer.commit();\n- Store.MetadataSnapshot metadata = store.getMetadata();\n+ writer.close();\n+\n+ Store.MetadataSnapshot metadata = store.getMetadata(null);\n List<StoreFileMetaData> metas = new ArrayList<>();\n for (StoreFileMetaData md : metadata) {\n metas.add(md);\n@@ -116,14 +118,14 @@ public void close() throws IOException {\n throw new RuntimeException(e);\n }\n });\n- Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata();\n+ Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null);\n Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);\n assertEquals(metas.size(), recoveryDiff.identical.size());\n assertEquals(0, recoveryDiff.different.size());\n assertEquals(0, recoveryDiff.missing.size());\n IndexReader reader = DirectoryReader.open(targetStore.directory());\n assertEquals(numDocs, reader.maxDoc());\n- IOUtils.close(reader, writer, store, targetStore);\n+ IOUtils.close(reader, store, targetStore);\n }\n \n public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {\n@@ -157,7 +159,7 @@ protected void failEngine(IOException cause) {\n writer.commit();\n writer.close();\n \n- Store.MetadataSnapshot metadata = store.getMetadata();\n+ Store.MetadataSnapshot metadata = store.getMetadata(null);\n List<StoreFileMetaData> metas = new ArrayList<>();\n for (StoreFileMetaData md : metadata) {\n metas.add(md);\n@@ -221,7 +223,7 @@ protected void failEngine(IOException cause) {\n writer.commit();\n writer.close();\n \n- Store.MetadataSnapshot metadata = store.getMetadata();\n+ Store.MetadataSnapshot metadata = store.getMetadata(null);\n List<StoreFileMetaData> metas = new ArrayList<>();\n for (StoreFileMetaData md : metadata) {\n metas.add(md);", "filename": "core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java", "status": "modified" } ] }
{ "body": "\"aggs\" accidentally got deprecated as a field name in requests, we should un-deprecate it since it is a supported parameter in search requests.\n\n```\nHTTP/1.1 500 Internal Server Error\nWarning: Deprecated field [aggs] used, expected [aggregations] instead\nContent-Type: application/json; charset=UTF-8\nContent-Encoding: gzip\nContent-Length: 556\n```\n", "comments": [ { "body": "I think it'd be nice to wait until #19509 is merged so we can assert things about headers in the REST tests.\n", "created_at": "2016-07-20T13:25:19Z" }, { "body": "Oops, I think this was me sorry. The problem is here: https://github.com/elastic/elasticsearch/blob/dec620c0b08213033dbfcacfc4b37663432a1a5c/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java#L94\n\nThe issue is that currently `ParseField` expects there to be only one acceptable name for the field and all other names are taken as deprecated. So the question here is do we want to deprecate either `aggs` or `aggregations` or do we want to add the ability to specify alternative names in `ParseField`.\n\nIf it is the latter I would suggest keeping the constructor in ParseField as is so only the first name is taken as an acceptable name and the rest are deprecated, and then add a `addAlternateName(String)` method for the few cases where we have alternative acceptable name.\n\nAs for #19509 it would be great if post that PR we could add a check to all calls made in rest tests to ensure they do not contain a warning header and if so fail the test (since most tests should not be using deprecated options). For any tests that do need to test deprecated options we could add a `accept_deprecated:true` to the YAML tests to bypass this warning header check.\n\nwdyt?\n", "created_at": "2016-07-21T09:08:52Z" }, { "body": "Actually we should also set the REST tests to have strict parsing anyway so using deprecated functionality causes an exception and the request to be rejected (unless the `accept_deprecated:true` option is set on the YAML test)\n", "created_at": "2016-07-21T10:59:18Z" }, { "body": "I opened https://github.com/elastic/elasticsearch/pull/19533 to fix this using the `ParseField` changes I mentioned above. I decided to keep `ParseField` immutable so the alternative names are passed into a constructor rather than in an add method.\n", "created_at": "2016-07-21T12:14:00Z" }, { "body": "> the request to be rejected (unless the accept_deprecated:true option is set on the YAML test)\n\nI think it might be better to call it `expect_deprecated:true` and then have the same semantics, but also fail if it _doesn't_ get a deprecation warning returned.\n", "created_at": "2016-07-28T16:04:12Z" }, { "body": "> I think it might be better to call it expect_deprecated:true and then have the same semantics, but also fail if it doesn't get a deprecation warning returned.\n\nYes, that sounds good. I'd honestly really like to make that change.\n", "created_at": "2016-07-28T16:23:53Z" } ], "number": 19504, "title": "Undeprecate \"aggs\" in search request bodies" }
{ "body": "This change adds a second ParseField for the `aggs` field in the search\nrequest so both `aggregations` and `aggs` are undeprecated allowed\nfields in the search request\n\nCloses #19504\n", "number": 19674, "review_comments": [ { "body": "would it be possible to improve indentation here?\n", "created_at": "2016-07-29T08:06:30Z" }, { "body": "? :)\n", "created_at": "2016-07-29T08:06:42Z" }, { "body": "testAggsParsing or testAggregationsParsing?\n", "created_at": "2016-07-29T08:07:24Z" } ], "title": "Undeprecates `aggs` in the search request" }
{ "commits": [ { "message": "Undeprecates `aggs` in the search request\n\nThis change adds a second ParseField for the `aggs` field in the search\nrequest so both `aggregations` and `aggs` are undeprecated allowed\nfields in the search request\n\nCloses #19504" } ], "files": [ { "diff": "@@ -91,7 +91,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ\n public static final ParseField SORT_FIELD = new ParseField(\"sort\");\n public static final ParseField TRACK_SCORES_FIELD = new ParseField(\"track_scores\");\n public static final ParseField INDICES_BOOST_FIELD = new ParseField(\"indices_boost\");\n- public static final ParseField AGGREGATIONS_FIELD = new ParseField(\"aggregations\", \"aggs\");\n+ public static final ParseField AGGREGATIONS_FIELD = new ParseField(\"aggregations\");\n+ public static final ParseField AGGS_FIELD = new ParseField(\"aggs\");\n public static final ParseField HIGHLIGHT_FIELD = new ParseField(\"highlight\");\n public static final ParseField SUGGEST_FIELD = new ParseField(\"suggest\");\n public static final ParseField RESCORE_FIELD = new ParseField(\"rescore\");\n@@ -998,7 +999,7 @@ public void parseXContent(QueryParseContext context, AggregatorParsers aggParser\n scriptFields.add(new ScriptField(context));\n }\n } else if (context.getParseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {\n- indexBoost = new ObjectFloatHashMap<String>();\n+ indexBoost = new ObjectFloatHashMap<>();\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n@@ -1009,7 +1010,8 @@ public void parseXContent(QueryParseContext context, AggregatorParsers aggParser\n parser.getTokenLocation());\n }\n }\n- } else if (context.getParseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) {\n+ } else if (context.getParseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)\n+ || context.getParseFieldMatcher().match(currentFieldName, AGGS_FIELD)) {\n aggregations = aggParsers.parseAggregators(context);\n } else if (context.getParseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {\n highlightBuilder = HighlightBuilder.fromXContent(context);", "filename": "core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java", "status": "modified" }, { "diff": "@@ -545,6 +545,41 @@ public void testParseSort() throws IOException {\n }\n }\n \n+ public void testAggsParsing() throws IOException {\n+ {\n+ String restContent = \"{\\n\" + \" \" + \n+ \"\\\"aggs\\\": {\" + \n+ \" \\\"test_agg\\\": {\\n\" + \n+ \" \" + \"\\\"terms\\\" : {\\n\" + \n+ \" \\\"field\\\": \\\"foo\\\"\\n\" + \n+ \" }\\n\" + \n+ \" }\\n\" + \n+ \" }\\n\" + \n+ \"}\\n\";\n+ try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {\n+ SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser), aggParsers,\n+ suggesters);\n+ assertEquals(1, searchSourceBuilder.aggregations().count());\n+ }\n+ }\n+ {\n+ String restContent = \"{\\n\" + \n+ \" \\\"aggregations\\\": {\" + \n+ \" \\\"test_agg\\\": {\\n\" + \n+ \" \\\"terms\\\" : {\\n\" + \n+ \" \\\"field\\\": \\\"foo\\\"\\n\" + \n+ \" }\\n\" + \n+ \" }\\n\" + \n+ \" }\\n\" + \n+ \"}\\n\";\n+ try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {\n+ SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser), aggParsers,\n+ suggesters);\n+ assertEquals(1, searchSourceBuilder.aggregations().count());\n+ }\n+ }\n+ }\n+\n /**\n * test that we can parse the `rescore` element either as single object or as array\n */", "filename": "core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue. Note that whether you're filing a bug report or a\nfeature request, ensure that your submission is for an\n[OS that we support](https://www.elastic.co/support/matrix#show_os).\nBug reports on an OS that we do not support or feature requests\nspecific to an OS that we do not support will be closed.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 2.3.3 & 2.3.4\n\n**JVM version**: what is running in Cloud now\n\n**OS version**: what is running in Cloud now\n\n**Description of the problem including expected versus actual behavior**:\nI’m trying to do some cat queries and I run into an issue that I can’t see any results when I try to use a wildcard in _cat/shards\n\n**Steps to reproduce**:\n0. Make sure you have some indices named topbeat*\n1. GET _cat/indices/topbeat\\* <-- as expected\n2. GET _cat/segments/topbeat\\* <-- as expected\n3. GET _cat/shards/topbeat\\* <-- empty response\n\nBut if I point to a single index with its full name, it does work.\n\nNot sure if this is a bug or an enhancement, the docs actually speak of \"[index pattern](https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html#cat-shards)\", so I'm leaning towards bug.\n\n**Provide logs (if relevant)**:\n", "comments": [], "number": 19634, "title": "_cat/shards should support index pattern wildcards" }
{ "body": "`/_cat/shards` does not support wildcards for indices (like `/_cat/shards/boo*`) but it should according to the documentation.\n\ncloses #19634\n", "number": 19655, "review_comments": [ { "body": "This change also affects how the cluster state api works. I guess it means that the cluster state api doesn't resolve indices? I wonder if it's correct to apply the fix here.\n", "created_at": "2016-07-28T14:05:46Z" }, { "body": "I think that the wait for status is not necessary anymore.\n", "created_at": "2016-07-28T14:06:23Z" }, { "body": "is the refresh needed here?\n", "created_at": "2016-07-28T14:08:55Z" }, { "body": "I cannot help but wonder if we should keep on adding these types of tests as REST tests. Are they really useful to language clients? Or is it just the only way that we had up until now to test the REST layer? I don't mean to block this specific change, just wondering what we should do in the future. Would be nice to unit test this stuff more, or maybe use ESRestTestCase that @nik9000 recently introduced, especially with cat apis that only return text, so no json parsing involved.\n", "created_at": "2016-07-28T14:11:33Z" }, { "body": "Yes. I changed the cluster state api so that indices are resolved for both `metadata` and `routing_table` data.\n\nBefore this change, if you have two indices like `index1` and `index2` and you retrieve the cluster state just for metadata and routing table using an index pattern like `index*`, it returns only metadata and corresponding routing tables are missing.\n\nI thought it was coherent to align the behavior.\n", "created_at": "2016-07-28T14:22:49Z" }, { "body": "No, thanks\n", "created_at": "2016-07-28T14:24:33Z" }, { "body": "interesting, seems like a related bug that we may need an issue for and new tests for?\n", "created_at": "2016-07-28T14:26:36Z" }, { "body": "That's a bigger discussion that might deserve its own issue. I don't have a strong opinion on this.\n", "created_at": "2016-07-28T14:35:28Z" } ], "title": "Add index pattern wildcards support to _cat/shards" }
{ "commits": [ { "message": "/_cat/shards should support wilcards for indices\n\ncloses #19634" }, { "message": "Remove unnecessary indices refresh and wait for green status in REST tests" } ], "files": [ { "diff": "@@ -73,6 +73,7 @@ public void doRequest(final RestRequest request, final RestChannel channel, fina\n public void processResponse(final ClusterStateResponse clusterStateResponse) {\n IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();\n indicesStatsRequest.all();\n+ indicesStatsRequest.indices(indices);\n client.admin().indices().stats(indicesStatsRequest, new RestResponseListener<IndicesStatsResponse>(channel) {\n @Override\n public RestResponse buildResponse(IndicesStatsResponse indicesStatsResponse) throws Exception {", "filename": "core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java", "status": "modified" }, { "diff": "@@ -25,7 +25,7 @@ some bandwidth by supplying an index pattern to the end.\n \n [source,sh]\n --------------------------------------------------\n-% curl 192.168.56.20:9200/_cat/shards/wiki2\n+% curl 192.168.56.20:9200/_cat/shards/wiki*\n wiki2 0 p STARTED 197 3.2mb 192.168.56.10 Stiletto\n wiki2 1 p STARTED 205 5.9mb 192.168.56.30 Frankie Raye\n wiki2 2 p STARTED 275 7.8mb 192.168.56.20 Commander Kraken", "filename": "docs/reference/cat/shards.asciidoc", "status": "modified" }, { "diff": "@@ -69,3 +69,50 @@\n \\s*\n )\n $/\n+\n+---\n+\"Test cat indices using wildcards\":\n+\n+ - do:\n+ indices.create:\n+ index: foo\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ indices.create:\n+ index: bar\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ indices.create:\n+ index: baz\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ cat.indices:\n+ index: f*\n+ v: false\n+ h: i\n+\n+ - match:\n+ $body: |\n+ /^(foo \\n?)$/\n+\n+ - do:\n+ cat.indices:\n+ index: ba*\n+ v: false\n+ h: i\n+\n+ - match:\n+ $body: |\n+ /^(ba(r|z) \\n?){2}$/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml", "status": "modified" }, { "diff": "@@ -45,9 +45,6 @@\n type: type\n body: { foo: bar }\n refresh: true\n- - do:\n- cluster.health:\n- wait_for_status: green\n - do:\n cat.segments: {}\n - match:\n@@ -62,11 +59,6 @@\n settings:\n number_of_shards: \"3\"\n number_of_replicas: \"0\"\n- - do:\n- cluster.health:\n- wait_for_status: green\n- wait_for_relocating_shards: 0\n-\n - do:\n index:\n index: index2\n@@ -102,10 +94,6 @@\n number_of_shards: \"1\"\n number_of_replicas: \"0\"\n \n- - do:\n- cluster.health:\n- wait_for_status: green\n-\n - do:\n indices.close:\n index: index1\n@@ -114,3 +102,71 @@\n catch: forbidden\n cat.segments:\n index: index1\n+\n+---\n+\"Test cat segments using wildcards\":\n+\n+ - do:\n+ indices.create:\n+ index: foo\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ index:\n+ index: foo\n+ type: type\n+ body: { test: foo }\n+ refresh: true\n+\n+ - do:\n+ indices.create:\n+ index: bar\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ index:\n+ index: bar\n+ type: type\n+ body: { test: bar }\n+ refresh: true\n+\n+ - do:\n+ indices.create:\n+ index: baz\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ index:\n+ index: baz\n+ type: type\n+ body: { test: baz }\n+ refresh: true\n+\n+ - do:\n+ cat.segments:\n+ index: f*\n+ v: false\n+ h: i\n+\n+ - match:\n+ $body: |\n+ /^(foo \\n?)$/\n+\n+ - do:\n+ cat.segments:\n+ index: ba*\n+ v: false\n+ h: i\n+\n+ - match:\n+ $body: |\n+ /^(ba(r|z) \\n?){2}$/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yaml", "status": "modified" }, { "diff": "@@ -87,9 +87,6 @@\n number_of_shards: 5\n number_of_replicas: 0\n \n- - do:\n- cluster.health:\n- wait_for_status: green\n - do:\n indices.flush_synced:\n index: sync_id_test\n@@ -116,10 +113,6 @@\n number_of_shards: 5\n number_of_replicas: 0\n \n- - do:\n- cluster.health:\n- wait_for_status: green\n-\n - do:\n cat.shards:\n index: sync_id_no_flush_test\n@@ -153,9 +146,6 @@\n settings:\n number_of_shards: \"5\"\n number_of_replicas: \"0\"\n- - do:\n- cluster.health:\n- wait_for_relocating_shards: 0\n \n - do:\n cat.shards: {}\n@@ -179,13 +169,57 @@\n number_of_replicas: \"1\"\n shadow_replicas: true\n shared_filesystem: false\n- - do:\n- cluster.health:\n- wait_for_relocating_shards: 0\n \n - do:\n cat.shards:\n index: index3\n - match:\n $body: |\n /^(index3 \\s+ \\d \\s+ (p|s) \\s+ ((STARTED|INITIALIZING|RELOCATING) \\s+ (\\d \\s+ (\\d+|\\d+[.]\\d+)(kb|b) \\s+)? \\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3} \\s+ .+|UNASSIGNED \\s+) \\n?){2}$/\n+\n+---\n+\"Test cat shards using wildcards\":\n+\n+ - do:\n+ indices.create:\n+ index: foo\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ indices.create:\n+ index: bar\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ indices.create:\n+ index: baz\n+ body:\n+ settings:\n+ number_of_shards: \"1\"\n+ number_of_replicas: \"0\"\n+\n+ - do:\n+ cat.shards:\n+ index: f*\n+ v: false\n+ h: i\n+\n+ - match:\n+ $body: |\n+ /^(foo \\n?)$/\n+\n+ - do:\n+ cat.shards:\n+ index: ba*\n+ v: false\n+ h: i\n+\n+ - match:\n+ $body: |\n+ /^(ba(r|z) \\n?){2}$/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml", "status": "modified" } ] }
{ "body": "When I was upgraded es from 2.0 to 2.1, highlight feature stop working.\n\nI got this error message:\n\n```\n{\n \"shard\": 0,\n \"index\": \"4odevelop_4o\",\n \"node\": \"XiZaRocLQGuBwZPg58Naqw\",\n \"reason\": {\n \"type\": \"illegal_state_exception\",\n \"reason\": \"can't load global ordinals for reader of type: class org.apache.lucene.search.highlight.WeightedSpanTermExtractor$DelegatingLeafReader must be a DirectoryReader\"\n }\n}\n```\n", "comments": [ { "body": "Problem occurs when I using next search query\n\n```\n{\n \"from\": 0,\n \"size\": 20,\n \"sort\": [\n {\n \"_score\": {\n \"missing\": \"_last\",\n \"order\": \"desc\"\n }\n }\n ],\n \"highlight\": {\n \"pre_tags\": [\n \"<lukituki>\"\n ],\n \"post_tags\": [\n \"</lukituki>\"\n ],\n \"fields\": {\n \"searchText\": { }\n }\n },\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"authorizationToken\": {\n \"value\": \"1859\"\n }\n }\n },\n {\n \"term\": {\n \"deleted\": {\n \"value\": false\n }\n }\n }\n ],\n \"should\": [\n {\n \"has_child\": {\n \"type\": \"stream1boost\",\n \"score_mode\": \"sum\",\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"userAccountId\": {\n \"boost\": 0,\n \"value\": 1859\n }\n }\n }\n ]\n }\n },\n \"score_mode\": \"sum\",\n \"boost_mode\": \"max\",\n \"script_score\": {\n \"script\": \"doc['searchBoost'].value\"\n }\n }\n }\n }\n }\n ],\n \"minimum_should_match\": \"0\"\n }\n }\n}\n```\n", "created_at": "2015-11-25T09:24:29Z" }, { "body": "@lukapor, I edited your messages to apply code formatting.\n\nThis certainly looks like a bug. It'd be super helpful if you could make a gist that recreates this against an empty index using [curl](https://www.elastic.co/help). It'd make reproducing the issue locally super easy.\n", "created_at": "2015-11-25T14:17:18Z" }, { "body": "Hello,\nunfortunately I do not have script for the generation the index, whereas it creates programming. An easy way to tell just where the problem is.\nI have stream1 object that has a child object stream1boost.\nWhen I search from stream1 one of sort is by most used. There i use child function score. If this is used (\"should\": [{\"has_child\": { ...) the problem occours otherwise not.\n\nI will try to generate script to reproduce the bug\n", "created_at": "2015-11-25T14:50:01Z" }, { "body": "I attached script to create index mapping, insert data and then query them. On query request will\n[insertData.txt](https://github.com/elastic/elasticsearch/files/44438/insertData.txt)\n[mapping.txt](https://github.com/elastic/elasticsearch/files/44436/mapping.txt)\n[query.txt](https://github.com/elastic/elasticsearch/files/44437/query.txt)\n\n fail.\n", "created_at": "2015-11-25T21:59:45Z" }, { "body": "Reduced to the following minimal test case:\n\n```\nPUT /test1\n{\n \"mappings\": {\n \"stream1\": {\n },\n \"stream1boost\": {\n \"_parent\": {\n \"type\": \"stream1\"\n }\n }\n }\n}\n\nPUT /test1/stream1/1\n{\n \"searchText\": \"stream1\"\n}\n\n\nPUT /test1/stream1boost/1?parent=1\n{\n \"searchText\": \"stream1\",\n \"searchBoost\": 1,\n \"userId\": 1\n}\n\n\nPOST /test1/stream1/_search\n{\n \"from\": 0,\n \"size\": 20,\n \"highlight\": {\n \"fields\": {\n \"searchText\": { }\n }\n },\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"has_child\": {\n \"type\": \"stream1boost\",\n \"query\": {\n \"match_all\": {}\n }\n }\n }\n ]\n }\n }\n}\n```\n", "created_at": "2015-11-28T17:45:02Z" }, { "body": "@martijnvg could you take a look please\n", "created_at": "2015-11-28T17:45:35Z" }, { "body": "The problem here is that the parent/child queries since 2.0 require a top level reader is used. During highlighting we re-execute the query for each hit using the leaf reader the hit was found in. The parent/child queries refuse to work with this now. Before 2.0 highlighting wouldn't have worked all the time with parent/child queries as the child hit maybe in a different leaf reader (segment) then the parent hit.\n\nThe right way for highlighting in this case would be to use `inner_hits` and move the highlighting part from the top level to the inner hits part:\n\n```\nPOST /test1/stream1/_search\n{\n \"from\": 0,\n \"size\": 20,\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"has_child\": {\n \"type\": \"stream1boost\",\n \"query\": {\n \"match\": {\n \"searchText\": \"stream1\"\n }\n },\n \"inner_hits\": {\n \"highlight\": {\n \"fields\": {\n \"searchText\": {}\n }\n }\n }\n }\n }\n ]\n }\n }\n}\n```\n\nI think that instead of throwing an error highlighting shouldn't try extract terms from `has_child` or `has_parent`, so that if these queries just happen to be part of a bigger query other highlights do get returned in the response.\n", "created_at": "2016-01-27T13:39:37Z" }, { "body": "I can tell you only that the query works in es2.0, it stop working with version 2.1\n\nMy use case is next\nstream1 holds searchText propertie\nstream1boost is user boost for specific stream (stream1 has multiple stream1boost or none), stream1boost has only searchBoost propertie (weight)\n\nSo I am searching for stream with some prefix query with different sorts (by name, by most used, ..). Results that I want are stream1 and their highlights. When most used sort is selected I use has_child should query with function_score, that calculate score of current streams. \n", "created_at": "2016-01-27T14:37:34Z" }, { "body": "> I think that instead of throwing an error highlighting shouldn't try extract terms from has_child or has_parent, so that if these queries just happen to be part of a bigger query other highlights do get returned in the response.\n\nMakes sense. Without inner hits, you wouldn't expect docs matching a has_child or has_parent query to be returned anyway, so there shouldn't be any highlighting on these docs.\n", "created_at": "2016-01-28T10:08:09Z" }, { "body": "I have the same problem with 2.2. In my case, I need highlighting for the parent and the child. If I remove the top-level highlighting then it works fine.\n\nDoes not work\n\n``` js\n{\n query: {\n bool: {\n should: [\n {\n query_string: 'google'\n },\n {\n has_child: {\n type: 'child_doc',\n score_mode: 'max',\n query: {\n query_string: 'google'\n },\n inner_hits: {\n highlight: {\n order: 'score',\n fields: {\n title: { number_of_fragments: 0 },\n body: { number_of_fragments: 3 }\n }\n },\n from: 0,\n size: 1\n }\n }\n }\n ]\n }\n },\n highlight: {\n order: 'score',\n fields: {\n description: { number_of_fragments: 0 }\n }\n }\n}\n```\n\nDoes work, but no highlighting on parent document\n\n``` js\n{\n query: {\n bool: {\n should: [\n {\n query_string: 'google'\n },\n {\n has_child: {\n type: 'child_doc',\n score_mode: 'max',\n query: {\n query_string: 'google'\n },\n inner_hits: {\n highlight: {\n order: 'score',\n fields: {\n title: { number_of_fragments: 0 },\n body: { number_of_fragments: 3 }\n }\n },\n from: 0,\n size: 1\n }\n }\n }\n ]\n }\n }\n}\n```\n", "created_at": "2016-02-19T00:12:40Z" }, { "body": "I'm having the same results as @rpedela (using ES 2.1). Any fixes?\n", "created_at": "2016-03-04T11:01:38Z" }, { "body": "> I'm having the same results as @rpedela (using ES 2.1). Any fixes?\n\nI'd try using a highlight_query element that doesn't include parent/child.\n", "created_at": "2016-03-04T13:35:26Z" }, { "body": "This thing: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-highlighting.html#_highlight_query\n", "created_at": "2016-03-04T13:35:46Z" }, { "body": "So the search request body should look like this:\n\n```\n{\n query: {\n bool: {\n should: [\n {\n query_string: 'google'\n },\n {\n has_child: {\n type: 'child_doc',\n score_mode: 'max',\n query: {\n query_string: 'google'\n },\n inner_hits: {\n highlight: {\n order: 'score',\n fields: {\n title: { number_of_fragments: 0 },\n body: { number_of_fragments: 3 }\n }\n },\n from: 0,\n size: 1\n }\n }\n }\n ]\n }\n },\n highlight: {\n order: 'score',\n fields: {\n description: { number_of_fragments: 0 }\n },\n highlight_query: {\n bool: {\n should: [\n {\n query_string: 'google'\n }\n ]\n }\n }\n}\n```\n", "created_at": "2016-03-04T13:38:23Z" }, { "body": "It's still not working for me.\n\nOriginal code without highlights, working ok.\n\n```\n {\n query:\n {\n bool:\n {\n must:\n [\n {\n query_string:\n {\n fields: [ 'title', 'body'],\n query: pUserInput,\n\n }\n },\n {\n term: {\n source_id: pSourceIdInput\n }\n },\n {\n has_child:\n {\n type: \"user_item_relation\",\n query:\n {\n bool:\n {\n must:\n [\n {\n term:\n {\n user_id: pUserIdInput\n }\n }\n /*{\n term:\n {\n favorite: 1\n }\n }*/\n\n ]\n }\n }\n }\n }\n ]\n }\n }\n }\n```\n\nCode with highlights, query works ok but no highlights are shown.\n\n```\n {\n query:\n {\n bool:\n {\n must:\n [\n {\n query_string:\n {\n fields: [ 'title', 'body'],\n query: pUserInput,\n\n }\n },\n {\n term: {\n source_id: pSourceIdInput\n }\n },\n {\n has_child:\n {\n type: \"user_item_relation\",\n query:\n {\n bool:\n {\n must:\n [\n {\n term:\n {\n user_id: pUserIdInput\n }\n }\n /*{\n term:\n {\n favorite: 1\n }\n }*/\n\n ]\n }\n },\n inner_hits: {\n highlight: {\n order: 'score',\n fields: {\n title: { number_of_fragments: 0 },\n body: { number_of_fragments: 3 }\n }\n }\n }\n }\n }\n ]\n }\n },\n highlight: {\n order: 'score',\n fields: {\n title: { number_of_fragments: 0 }\n },\n highlight_query: {\n bool: {\n should: [\n {\n query_string:\n {\n fields: [ 'title', 'body'],\n query: pUserInput,\n\n }\n }\n ]\n }\n }\n }\n }\n```\n\nWhat I'm doing wrong here? \nThanks in advance.\n", "created_at": "2016-03-04T14:05:43Z" }, { "body": "@borjakhet You still get the same error? This should work, I checked it locally. Maybe try to run with @clintongormley minimal reproduction via Sense / curl commands? So that it is easier to see what happens?\n", "created_at": "2016-03-04T14:12:43Z" }, { "body": "It works, it was my API fault, I was reading `_source.title` instead of `highlight.title`\n", "created_at": "2016-03-04T14:20:44Z" }, { "body": "Also have this issue with 2.1.0 and using `highlight_query` only solves it.. Otherwise error is thrown. Should we wait for a bugfix? Current solution looks more like a workaround, even [docs](https://www.elastic.co/guide/en/elasticsearch/reference/2.3/search-request-highlighting.html#_highlight_query) explains the true meaning of using `highlight_query`... \n", "created_at": "2016-07-25T09:56:01Z" }, { "body": "+1\nThis was quite confusing for me as I was highlighting on parent and child documents at the same time.\n\nEssentially it seems it will work by merely including a highlight_query clause in every highlight clause, even if it's just a match all.\n\nAlso, I was using 2.3 and thought it would be fixed by now, seeing all the references to 2.1. Looks like it won't come till 2.4.\n", "created_at": "2016-07-29T21:52:51Z" } ], "number": 14999, "title": "Highlight not working in elasticsearch 2.1" }
{ "body": "The plain highlighter fails when it tries to select the fragments based on a query containing either a `has_child` or `has_parent` query.\n\nThe plain highlighter should just ignore parent/child queries as it makes no sense to highlight a parent match with a `has_child` as the child documents are not available at highlight time. Instead if child document should be highlighted inner hits should be used.\n\nParent/child queries already have no effect when the `fvh` or `postings` highligher is used. The test added in this PR verifies that.\n\nPR for #14999\n", "number": 19616, "review_comments": [ { "body": "Especially now that this is public, can it please have some javadoc explaining what it does at all? \n", "created_at": "2016-07-27T09:59:40Z" }, { "body": "added: d3c83c707c5594b08037f282a4f9928cc72d7036\n", "created_at": "2016-07-27T10:31:48Z" } ], "title": "Plain highlighter should ignore parent/child queries" }
{ "commits": [ { "message": "Plain highlighter should ignore parent/child queries.\n\nThe plain highligher fails when it tries to select the fragments based on a query containing either a `has_child` or `has_parent` query.\n\nThe plain highligher should just ignore parent/child queries as it makes no sense to highligh a parent match with a has_child as the child documents are not available at highlight time. Instead if child document should be highlighed inner hits should be used.\n\nParent/child queries already have no effect when the `fvh` or `postings` highligher is used. The test added in this commit verifies that.\n\nCloses #14999" } ], "files": [ { "diff": "@@ -352,7 +352,17 @@ protected Query doToQuery(QueryShardContext context) throws IOException {\n parentType, scoreMode, parentChildIndexFieldData, context.getSearchSimilarity());\n }\n \n- static final class LateParsingQuery extends Query {\n+ /**\n+ * A query that rewrites into another query using\n+ * {@link JoinUtil#createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, MultiDocValues.OrdinalMap, int, int)}\n+ * that executes the actual join.\n+ *\n+ * This query is exclusively used by the {@link HasChildQueryBuilder} and {@link HasParentQueryBuilder} to get access\n+ * to the {@link DirectoryReader} used by the current search in order to retrieve the {@link MultiDocValues.OrdinalMap}.\n+ * The {@link MultiDocValues.OrdinalMap} is required by {@link JoinUtil} to execute the join.\n+ */\n+ // TODO: Find a way to remove this query and let doToQuery(...) just return the query from JoinUtil.createJoinQuery(...)\n+ public static final class LateParsingQuery extends Query {\n \n private final Query toQuery;\n private final Query innerQuery;", "filename": "core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery;\n import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;\n import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;\n+import org.elasticsearch.index.query.HasChildQueryBuilder;\n \n import java.io.IOException;\n import java.util.Map;\n@@ -89,11 +90,16 @@ protected void extractUnknownQuery(Query query,\n }\n \n protected void extract(Query query, float boost, Map<String, WeightedSpanTerm> terms) throws IOException {\n- // skip all geo queries, see https://issues.apache.org/jira/browse/LUCENE-7293 and\n- // https://github.com/elastic/elasticsearch/issues/17537\n- if (query instanceof GeoPointInBBoxQuery == false) {\n- super.extract(query, boost, terms);\n+ if (query instanceof GeoPointInBBoxQuery) {\n+ // skip all geo queries, see https://issues.apache.org/jira/browse/LUCENE-7293 and\n+ // https://github.com/elastic/elasticsearch/issues/17537\n+ return;\n+ } else if (query instanceof HasChildQueryBuilder.LateParsingQuery) {\n+ // skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999\n+ return;\n }\n+\n+ super.extract(query, boost, terms);\n }\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java", "status": "modified" }, { "diff": "@@ -32,8 +32,12 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.index.IndexModule;\n+import org.elasticsearch.index.query.BoolQueryBuilder;\n import org.elasticsearch.index.query.HasChildQueryBuilder;\n+import org.elasticsearch.index.query.HasParentQueryBuilder;\n import org.elasticsearch.index.query.IdsQueryBuilder;\n+import org.elasticsearch.index.query.MatchAllQueryBuilder;\n+import org.elasticsearch.index.query.MatchQueryBuilder;\n import org.elasticsearch.index.query.QueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;\n@@ -43,6 +47,8 @@\n import org.elasticsearch.search.aggregations.bucket.global.Global;\n import org.elasticsearch.search.aggregations.bucket.terms.Terms;\n import org.elasticsearch.search.builder.SearchSourceBuilder;\n+import org.elasticsearch.search.highlight.HighlightBuilder;\n+import org.elasticsearch.search.highlight.HighlightField;\n import org.elasticsearch.search.sort.SortBuilders;\n import org.elasticsearch.search.sort.SortOrder;\n import org.elasticsearch.test.ESIntegTestCase;\n@@ -1889,4 +1895,43 @@ public void testHasChildInnerQueryType() {\n QueryBuilders.hasChildQuery(\"child-type\", new IdsQueryBuilder().addIds(\"child-id\"), ScoreMode.None)).get();\n assertSearchHits(searchResponse, \"parent-id\");\n }\n+\n+ public void testHighlighersIgnoreParentChild() {\n+ assertAcked(prepareCreate(\"test\")\n+ .addMapping(\"parent-type\", \"searchText\", \"type=text,term_vector=with_positions_offsets,index_options=offsets\")\n+ .addMapping(\"child-type\", \"_parent\", \"type=parent-type\", \"searchText\",\n+ \"type=text,term_vector=with_positions_offsets,index_options=offsets\"));\n+ client().prepareIndex(\"test\", \"parent-type\", \"parent-id\").setSource(\"searchText\", \"quick brown fox\").get();\n+ client().prepareIndex(\"test\", \"child-type\", \"child-id\").setParent(\"parent-id\").setSource(\"searchText\", \"quick brown fox\").get();\n+ refresh();\n+\n+ String[] highlightTypes = new String[] {\"plain\", \"fvh\", \"postings\"};\n+ for (String highlightType : highlightTypes) {\n+ logger.info(\"Testing with highlight type [{}]\", highlightType);\n+ SearchResponse searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(new BoolQueryBuilder()\n+ .must(new MatchQueryBuilder(\"searchText\", \"fox\"))\n+ .must(new HasChildQueryBuilder(\"child-type\", new MatchAllQueryBuilder(), ScoreMode.None))\n+ )\n+ .highlighter(new HighlightBuilder().field(new HighlightBuilder.Field(\"searchText\").highlighterType(highlightType)))\n+ .get();\n+ assertHitCount(searchResponse, 1);\n+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"parent-id\"));\n+ HighlightField highlightField = searchResponse.getHits().getAt(0).getHighlightFields().get(\"searchText\");\n+ assertThat(highlightField.getFragments()[0].string(), equalTo(\"quick brown <em>fox</em>\"));\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(new BoolQueryBuilder()\n+ .must(new MatchQueryBuilder(\"searchText\", \"fox\"))\n+ .must(new HasParentQueryBuilder(\"parent-type\", new MatchAllQueryBuilder(), false))\n+ )\n+ .highlighter(new HighlightBuilder().field(new HighlightBuilder.Field(\"searchText\").highlighterType(highlightType)))\n+ .get();\n+ assertHitCount(searchResponse, 1);\n+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"child-id\"));\n+ highlightField = searchResponse.getHits().getAt(0).getHighlightFields().get(\"searchText\");\n+ assertThat(highlightField.getFragments()[0].string(), equalTo(\"quick brown <em>fox</em>\"));\n+ }\n+ }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java", "status": "modified" } ] }
{ "body": "Elasticsearch Rollover API tested on ES 5.0-alpha4\n\nTested as shown in documentation:\nhttps://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html\n\n```\nPUT /roll_me_over-0001\n{\n \"aliases\": {\n \"roll_me_over\": {}\n }\n}\n\nPOST roll_me_over/_rollover \n{\n \"conditions\": {\n \"max_age\": \"1m\"\n }\n}\n```\n\nDocs state that naming will follow the same pattern:\n\n> If the name of the existing index ends with - and a number — e.g. logs-0001 — then the name of the new index will follow the same pattern, just incrementing the number (logs-0002).\n\nBut instead:\n\n```\nyellow open roll_me_over-0001 5 1 0 0 650b 650b\nyellow open roll_me_over-2 5 1 0 0 260b 260b\n```\n", "comments": [ { "body": "I think we should respect the zero padding as it makes for easier sorting, but then if we rollover to a number higher than allowed with the padding we should do what we do today (ie just increment)\n", "created_at": "2016-07-19T12:40:10Z" }, { "body": "do we have to auto-detect this? can't we ask the user to provide this with the request and have a default there?\n", "created_at": "2016-07-19T12:44:14Z" }, { "body": "I agree with @s1monw here, we can already supply a name for the new index vi `POST /roll_me_over-0001/_rollover/roll_me_over-0002` where `roll_me_over-0002` is the new index name. IMO, it would be simpler to just increment the old index name not respecting zero padding (current behaviour) and let the user provide the new index name if a simple increment is not enough?\nThough, the example in the docs is misleading here, I will adapt it to ensure the above is clear?\n@clintongormley WDYT?\n", "created_at": "2016-07-19T14:54:59Z" }, { "body": "another option is to just have a padding all the time lets say 6 digits and we are done? `String.format` should do this for us? \n", "created_at": "2016-07-19T19:24:26Z" }, { "body": "hmm, IMO having any padding at all seems arbitrary, is there any specific reason to add any padding? \n", "created_at": "2016-07-19T22:58:32Z" }, { "body": "@areek I think people often sort the index by name for recovery order etc. so that might be the most significant one\n", "created_at": "2016-07-20T07:16:53Z" }, { "body": "> another option is to just have a padding all the time lets say 6 digits and we are done?\n\n++\n", "created_at": "2016-07-21T10:38:14Z" } ], "number": 19484, "title": "Rollover API Numbering not honouring preceding zeroes" }
{ "body": "This commit adds a zero-padding length of 6 to the \nincrement in the auto-generated rollover index name \nto support sorting generated indices by index name.\n\ncloses #19484\n", "number": 19610, "review_comments": [], "title": "Add zero-padding to auto-generated rollover index name increment" }
{ "commits": [ { "message": "Add zero-padding to auto-generated rollover index name increment\n\ncloses #19484" } ], "files": [ { "diff": "@@ -47,6 +47,7 @@\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n \n+import java.util.Locale;\n import java.util.Set;\n import java.util.regex.Pattern;\n import java.util.stream.Collectors;\n@@ -171,7 +172,7 @@ static String generateRolloverIndexName(String sourceIndexName) {\n int numberIndex = sourceIndexName.lastIndexOf(\"-\");\n assert numberIndex != -1 : \"no separator '-' found\";\n int counter = Integer.parseInt(sourceIndexName.substring(numberIndex + 1));\n- return String.join(\"-\", sourceIndexName.substring(0, numberIndex), String.valueOf(++counter));\n+ return String.join(\"-\", sourceIndexName.substring(0, numberIndex), String.format(Locale.ROOT, \"%06d\", ++counter));\n } else {\n throw new IllegalArgumentException(\"index name [\" + sourceIndexName + \"] does not match pattern '^.*-(\\\\d)+$'\");\n }", "filename": "core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java", "status": "modified" }, { "diff": "@@ -39,14 +39,14 @@ public void testRolloverOnEmptyIndex() throws Exception {\n assertAcked(prepareCreate(\"test_index-1\").addAlias(new Alias(\"test_alias\")).get());\n final RolloverResponse response = client().admin().indices().prepareRolloverIndex(\"test_alias\").get();\n assertThat(response.getOldIndex(), equalTo(\"test_index-1\"));\n- assertThat(response.getNewIndex(), equalTo(\"test_index-2\"));\n+ assertThat(response.getNewIndex(), equalTo(\"test_index-000002\"));\n assertThat(response.isDryRun(), equalTo(false));\n assertThat(response.isRolledOver(), equalTo(true));\n assertThat(response.getConditionStatus().size(), equalTo(0));\n final ClusterState state = client().admin().cluster().prepareState().get().getState();\n final IndexMetaData oldIndex = state.metaData().index(\"test_index-1\");\n assertFalse(oldIndex.getAliases().containsKey(\"test_alias\"));\n- final IndexMetaData newIndex = state.metaData().index(\"test_index-2\");\n+ final IndexMetaData newIndex = state.metaData().index(\"test_index-000002\");\n assertTrue(newIndex.getAliases().containsKey(\"test_alias\"));\n }\n \n@@ -56,14 +56,14 @@ public void testRollover() throws Exception {\n flush(\"test_index-2\");\n final RolloverResponse response = client().admin().indices().prepareRolloverIndex(\"test_alias\").get();\n assertThat(response.getOldIndex(), equalTo(\"test_index-2\"));\n- assertThat(response.getNewIndex(), equalTo(\"test_index-3\"));\n+ assertThat(response.getNewIndex(), equalTo(\"test_index-000003\"));\n assertThat(response.isDryRun(), equalTo(false));\n assertThat(response.isRolledOver(), equalTo(true));\n assertThat(response.getConditionStatus().size(), equalTo(0));\n final ClusterState state = client().admin().cluster().prepareState().get().getState();\n final IndexMetaData oldIndex = state.metaData().index(\"test_index-2\");\n assertFalse(oldIndex.getAliases().containsKey(\"test_alias\"));\n- final IndexMetaData newIndex = state.metaData().index(\"test_index-3\");\n+ final IndexMetaData newIndex = state.metaData().index(\"test_index-000003\");\n assertTrue(newIndex.getAliases().containsKey(\"test_alias\"));\n }\n \n@@ -78,14 +78,14 @@ public void testRolloverWithIndexSettings() throws Exception {\n final RolloverResponse response = client().admin().indices().prepareRolloverIndex(\"test_alias\")\n .settings(settings).alias(new Alias(\"extra_alias\")).get();\n assertThat(response.getOldIndex(), equalTo(\"test_index-2\"));\n- assertThat(response.getNewIndex(), equalTo(\"test_index-3\"));\n+ assertThat(response.getNewIndex(), equalTo(\"test_index-000003\"));\n assertThat(response.isDryRun(), equalTo(false));\n assertThat(response.isRolledOver(), equalTo(true));\n assertThat(response.getConditionStatus().size(), equalTo(0));\n final ClusterState state = client().admin().cluster().prepareState().get().getState();\n final IndexMetaData oldIndex = state.metaData().index(\"test_index-2\");\n assertFalse(oldIndex.getAliases().containsKey(\"test_alias\"));\n- final IndexMetaData newIndex = state.metaData().index(\"test_index-3\");\n+ final IndexMetaData newIndex = state.metaData().index(\"test_index-000003\");\n assertThat(newIndex.getNumberOfShards(), equalTo(1));\n assertThat(newIndex.getNumberOfReplicas(), equalTo(0));\n assertTrue(newIndex.getAliases().containsKey(\"test_alias\"));\n@@ -98,14 +98,14 @@ public void testRolloverDryRun() throws Exception {\n flush(\"test_index-1\");\n final RolloverResponse response = client().admin().indices().prepareRolloverIndex(\"test_alias\").dryRun(true).get();\n assertThat(response.getOldIndex(), equalTo(\"test_index-1\"));\n- assertThat(response.getNewIndex(), equalTo(\"test_index-2\"));\n+ assertThat(response.getNewIndex(), equalTo(\"test_index-000002\"));\n assertThat(response.isDryRun(), equalTo(true));\n assertThat(response.isRolledOver(), equalTo(false));\n assertThat(response.getConditionStatus().size(), equalTo(0));\n final ClusterState state = client().admin().cluster().prepareState().get().getState();\n final IndexMetaData oldIndex = state.metaData().index(\"test_index-1\");\n assertTrue(oldIndex.getAliases().containsKey(\"test_alias\"));\n- final IndexMetaData newIndex = state.metaData().index(\"test_index-2\");\n+ final IndexMetaData newIndex = state.metaData().index(\"test_index-000002\");\n assertNull(newIndex);\n }\n \n@@ -126,7 +126,7 @@ public void testRolloverConditionsNotMet() throws Exception {\n final ClusterState state = client().admin().cluster().prepareState().get().getState();\n final IndexMetaData oldIndex = state.metaData().index(\"test_index-0\");\n assertTrue(oldIndex.getAliases().containsKey(\"test_alias\"));\n- final IndexMetaData newIndex = state.metaData().index(\"test_index-1\");\n+ final IndexMetaData newIndex = state.metaData().index(\"test_index-000001\");\n assertNull(newIndex);\n }\n \n@@ -151,14 +151,14 @@ public void testRolloverWithNewIndexName() throws Exception {\n public void testRolloverOnExistingIndex() throws Exception {\n assertAcked(prepareCreate(\"test_index-0\").addAlias(new Alias(\"test_alias\")).get());\n index(\"test_index-0\", \"type1\", \"1\", \"field\", \"value\");\n- assertAcked(prepareCreate(\"test_index-1\").get());\n- index(\"test_index-1\", \"type1\", \"1\", \"field\", \"value\");\n- flush(\"test_index-0\", \"test_index-1\");\n+ assertAcked(prepareCreate(\"test_index-000001\").get());\n+ index(\"test_index-000001\", \"type1\", \"1\", \"field\", \"value\");\n+ flush(\"test_index-0\", \"test_index-000001\");\n try {\n client().admin().indices().prepareRolloverIndex(\"test_alias\").get();\n fail(\"expected failure due to existing rollover index\");\n } catch (IndexAlreadyExistsException e) {\n- assertThat(e.getIndex().getName(), equalTo(\"test_index-1\"));\n+ assertThat(e.getIndex().getName(), equalTo(\"test_index-000001\"));\n }\n }\n }", "filename": "core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.test.ESTestCase;\n \n import java.util.HashSet;\n+import java.util.Locale;\n import java.util.Set;\n \n import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions;\n@@ -158,9 +159,9 @@ public void testGenerateRolloverIndexName() throws Exception {\n final String indexPrefix = randomAsciiOfLength(10);\n String indexEndingInNumbers = indexPrefix + \"-\" + num;\n assertThat(TransportRolloverAction.generateRolloverIndexName(indexEndingInNumbers),\n- equalTo(indexPrefix + \"-\" + (num + 1)));\n- assertThat(TransportRolloverAction.generateRolloverIndexName(\"index-name-1\"), equalTo(\"index-name-2\"));\n- assertThat(TransportRolloverAction.generateRolloverIndexName(\"index-name-2\"), equalTo(\"index-name-3\"));\n+ equalTo(indexPrefix + \"-\" + String.format(Locale.ROOT, \"%06d\", num + 1)));\n+ assertThat(TransportRolloverAction.generateRolloverIndexName(\"index-name-1\"), equalTo(\"index-name-000002\"));\n+ assertThat(TransportRolloverAction.generateRolloverIndexName(\"index-name-2\"), equalTo(\"index-name-000003\"));\n }\n \n public void testCreateIndexRequest() throws Exception {", "filename": "core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java", "status": "modified" }, { "diff": "@@ -12,7 +12,7 @@ the new alias.\n \n [source,js]\n --------------------------------------------------\n-PUT /logs-0001 <1>\n+PUT /logs-000001 <1>\n {\n \"aliases\": {\n \"logs_write\": {}\n@@ -28,18 +28,18 @@ POST logs_write/_rollover <2>\n }\n --------------------------------------------------\n // CONSOLE\n-<1> Creates an index called `logs-0001` with the alias `logs_write`.\n+<1> Creates an index called `logs-0000001` with the alias `logs_write`.\n <2> If the index pointed to by `logs_write` was created 7 or more days ago, or\n contains 1,000 or more documents, then the `logs-0002` index is created\n- and the `logs_write` alias is updated to point to `logs-0002`.\n+ and the `logs_write` alias is updated to point to `logs-000002`.\n \n The above request might return the following response:\n \n [source,js]\n --------------------------------------------------\n {\n- \"old_index\": \"logs-0001\",\n- \"new_index\": \"logs-0002\",\n+ \"old_index\": \"logs-000001\",\n+ \"new_index\": \"logs-000002\",\n \"rolled_over\": true, <1>\n \"dry_run\": false, <2>\n \"conditions\": { <3>\n@@ -56,8 +56,9 @@ The above request might return the following response:\n === Naming the new index\n \n If the name of the existing index ends with `-` and a number -- e.g.\n-`logs-0001` -- then the name of the new index will follow the same pattern,\n-just incrementing the number (`logs-0002`).\n+`logs-000001` -- then the name of the new index will follow the same pattern,\n+incrementing the number (`logs-000002`). The number is zero-padded with a length\n+of 6, regardless of the old index name.\n \n If the old name doesn't match this pattern then you must specify the name for\n the new index as follows:\n@@ -80,7 +81,7 @@ override any values set in matching index templates. For example, the following\n \n [source,js]\n --------------------------------------------------\n-PUT /logs-0001\n+PUT /logs-000001\n {\n \"aliases\": {\n \"logs_write\": {}\n@@ -108,7 +109,7 @@ checked without performing the actual rollover:\n \n [source,js]\n --------------------------------------------------\n-PUT /logs-0001\n+PUT /logs-000001\n {\n \"aliases\": {\n \"logs_write\": {}", "filename": "docs/reference/indices/rollover-index.asciidoc", "status": "modified" }, { "diff": "@@ -39,22 +39,22 @@\n max_docs: 1\n \n - match: { old_index: logs-1 }\n- - match: { new_index: logs-2 }\n+ - match: { new_index: logs-000002 }\n - match: { rolled_over: true }\n - match: { dry_run: false }\n - match: { conditions: { \"[max_docs: 1]\": true } }\n \n # ensure new index is created\n - do:\n indices.exists:\n- index: logs-2\n+ index: logs-000002\n \n - is_true: ''\n \n # index into new index\n - do:\n index:\n- index: logs-2\n+ index: logs-000002\n type: test\n id: \"2\"\n body: { \"foo\": \"hello world\" }\n@@ -69,5 +69,5 @@\n type: test\n \n - match: { hits.total: 1 }\n- - match: { hits.hits.0._index: \"logs-2\"}\n+ - match: { hits.hits.0._index: \"logs-000002\"}\n ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml", "status": "modified" } ] }
{ "body": "Spoke with @martijnvg and @BigFunger about some strange behavior around mutated state within sub-processors within ForEach. Realization was that we expose top-level document fields, but silently ignore mutations to them. This is a really confusing behavior.\n\nCurrently, the `foreach` processor makes it possible to read all other fields in the ingest document besides `_value`. It does not enable mutability of these fields. \n\nThis test passes that attempts to append to a top-level field passes. The append does not actually do anything since it operates on a copy. This should be fixed so that the original documents reflects modifications outside of `_value`.\n\n```\n public void testExecute() throws Exception {\n List<Object> values = new ArrayList<>();\n values.add(\"string\");\n values.add(1);\n values.add(null);\n IngestDocument ingestDocument = new IngestDocument(\n \"_index\", \"_type\", \"_id\", null, null, null, null, Collections.singletonMap(\"values\", values)\n );\n\n TemplateService ts = TestTemplateService.instance();\n\n ForEachProcessor processor = new ForEachProcessor(\n \"_tag\", \"values\", new CompoundProcessor(false,\n Collections.singletonList(new UppercaseProcessor(\"_tag_upper\", \"_value\")),\n Collections.singletonList(new AppendProcessor(\"_tag\", ts.compile(\"errors\"), (model) -> (Collections.singletonList(\"added\"))))\n ));\n processor.execute(ingestDocument);\n\n List<String> result = ingestDocument.getFieldValue(\"values\", List.class);\n assertThat(result.get(0), equalTo(\"STRING\"));\n assertThat(result.get(1), equalTo(1));\n assertThat(result.get(2), equalTo(null));\n }\n```\n", "comments": [], "number": 19592, "title": "Add mutability of rest of document in Ingest-ForeachProcessor sub processors" }
{ "body": "Before the `foreach` processor copied the ingest document and inserted the array element being processed under the `_value` key in the source. This caused two issues:\n- Modifications made to other fields were silently ignored.\n- If the array element contained a field with the name `_value` it would not be accessible.\n\nThis PR changes `foreach` processor to now copy the ingest document, but keep using the same ingest document and storing the array element temporarily in the ingest metadata (`_ingest._value` key) instead of the source itself. \n\nPR for #19592\n", "number": 19609, "review_comments": [ { "body": "do we want to emphasis the fact that `_ingest._value` is overwritten? so if you had a previous metadata field named `_value` in metadata, it will be lost.\n", "created_at": "2016-07-26T19:14:04Z" }, { "body": "Done: https://github.com/elastic/elasticsearch/pull/19609/files#diff-728004ac86ba5f8f15e87b8e88da132eR869\n", "created_at": "2016-07-27T07:36:46Z" } ], "title": "Change foreach processor to use ingest metadata for array element" }
{ "commits": [ { "message": "ingest: Change the `foreach` processor to use the `_ingest._value` ingest metadata attribute to store the current array element being processed.\n\nCloses #19592" } ], "files": [ { "diff": "@@ -41,15 +41,14 @@ final class WriteableIngestDocument implements Writeable, ToXContent {\n \n WriteableIngestDocument(StreamInput in) throws IOException {\n Map<String, Object> sourceAndMetadata = in.readMap();\n- @SuppressWarnings(\"unchecked\")\n- Map<String, String> ingestMetadata = (Map<String, String>) in.readGenericValue();\n+ Map<String, Object> ingestMetadata = in.readMap();\n this.ingestDocument = new IngestDocument(sourceAndMetadata, ingestMetadata);\n }\n \n @Override\n public void writeTo(StreamOutput out) throws IOException {\n out.writeMap(ingestDocument.getSourceAndMetadata());\n- out.writeGenericValue(ingestDocument.getIngestMetadata());\n+ out.writeMap(ingestDocument.getIngestMetadata());\n }\n \n IngestDocument getIngestDocument() {\n@@ -66,11 +65,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n }\n }\n builder.field(\"_source\", ingestDocument.getSourceAndMetadata());\n- builder.startObject(\"_ingest\");\n- for (Map.Entry<String, String> ingestMetadata : ingestDocument.getIngestMetadata().entrySet()) {\n- builder.field(ingestMetadata.getKey(), ingestMetadata.getValue());\n- }\n- builder.endObject();\n+ builder.field(\"_ingest\", ingestDocument.getIngestMetadata());\n builder.endObject();\n return builder;\n }", "filename": "core/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java", "status": "modified" }, { "diff": "@@ -135,14 +135,14 @@ private void putFailureMetadata(IngestDocument ingestDocument, ElasticsearchExce\n List<String> processorTagHeader = cause.getHeader(\"processor_tag\");\n String failedProcessorType = (processorTypeHeader != null) ? processorTypeHeader.get(0) : null;\n String failedProcessorTag = (processorTagHeader != null) ? processorTagHeader.get(0) : null;\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getRootCause().getMessage());\n ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType);\n ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag);\n }\n \n private void removeFailureMetadata(IngestDocument ingestDocument) {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD);\n ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD);\n ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD);", "filename": "core/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java", "status": "modified" }, { "diff": "@@ -54,7 +54,7 @@ public final class IngestDocument {\n static final String TIMESTAMP = \"timestamp\";\n \n private final Map<String, Object> sourceAndMetadata;\n- private final Map<String, String> ingestMetadata;\n+ private final Map<String, Object> ingestMetadata;\n \n public IngestDocument(String index, String type, String id, String routing, String parent, String timestamp,\n String ttl, Map<String, Object> source) {\n@@ -94,7 +94,7 @@ public IngestDocument(IngestDocument other) {\n * source and ingest metadata. This is needed because the ingest metadata will be initialized with the current timestamp at\n * init time, which makes equality comparisons impossible in tests.\n */\n- public IngestDocument(Map<String, Object> sourceAndMetadata, Map<String, String> ingestMetadata) {\n+ public IngestDocument(Map<String, Object> sourceAndMetadata, Map<String, Object> ingestMetadata) {\n this.sourceAndMetadata = sourceAndMetadata;\n this.ingestMetadata = ingestMetadata;\n }\n@@ -517,7 +517,7 @@ public Map<MetaData, String> extractMetadata() {\n * Returns the available ingest metadata fields, by default only timestamp, but it is possible to set additional ones.\n * Use only for reading values, modify them instead using {@link #setFieldValue(String, Object)} and {@link #removeField(String)}\n */\n- public Map<String, String> getIngestMetadata() {\n+ public Map<String, Object> getIngestMetadata() {\n return this.ingestMetadata;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/ingest/IngestDocument.java", "status": "modified" }, { "diff": "@@ -145,7 +145,7 @@ public void testExecuteVerboseItemWithOnFailure() throws Exception {\n assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), not(sameInstance(ingestDocument)));\n \n IngestDocument ingestDocumentWithOnFailureMetadata = new IngestDocument(ingestDocument);\n- Map<String, String> metadata = ingestDocumentWithOnFailureMetadata.getIngestMetadata();\n+ Map<String, Object> metadata = ingestDocumentWithOnFailureMetadata.getIngestMetadata();\n metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD, \"mock\");\n metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD, \"processor_0\");\n metadata.put(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD, \"processor failed\");", "filename": "core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java", "status": "modified" }, { "diff": "@@ -111,7 +111,7 @@ public void testActualCompoundProcessorWithOnFailure() throws Exception {\n assertThat(resultList.get(0).getFailure(), equalTo(exception));\n assertThat(resultList.get(0).getProcessorTag(), equalTo(expectedFailResult.getProcessorTag()));\n \n- Map<String, String> metadata = resultList.get(1).getIngestDocument().getIngestMetadata();\n+ Map<String, Object> metadata = resultList.get(1).getIngestDocument().getIngestMetadata();\n assertThat(metadata.get(ON_FAILURE_MESSAGE_FIELD), equalTo(\"fail\"));\n assertThat(metadata.get(ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"test\"));\n assertThat(metadata.get(ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo(\"fail\"));", "filename": "core/src/test/java/org/elasticsearch/action/ingest/TrackingResultProcessorTests.java", "status": "modified" }, { "diff": "@@ -47,7 +47,7 @@ public void testEqualsAndHashcode() throws Exception {\n for (int i = 0; i < numFields; i++) {\n sourceAndMetadata.put(randomFrom(IngestDocument.MetaData.values()).getFieldName(), randomAsciiOfLengthBetween(5, 10));\n }\n- Map<String, String> ingestMetadata = new HashMap<>();\n+ Map<String, Object> ingestMetadata = new HashMap<>();\n numFields = randomIntBetween(1, 5);\n for (int i = 0; i < numFields; i++) {\n ingestMetadata.put(randomAsciiOfLengthBetween(5, 10), randomAsciiOfLengthBetween(5, 10));\n@@ -70,7 +70,7 @@ public void testEqualsAndHashcode() throws Exception {\n changed = true;\n }\n \n- Map<String, String> otherIngestMetadata;\n+ Map<String, Object> otherIngestMetadata;\n if (randomBoolean()) {\n otherIngestMetadata = new HashMap<>();\n numFields = randomIntBetween(1, 5);\n@@ -103,7 +103,7 @@ public void testSerialization() throws IOException {\n for (int i = 0; i < numFields; i++) {\n sourceAndMetadata.put(randomFrom(IngestDocument.MetaData.values()).getFieldName(), randomAsciiOfLengthBetween(5, 10));\n }\n- Map<String, String> ingestMetadata = new HashMap<>();\n+ Map<String, Object> ingestMetadata = new HashMap<>();\n numFields = randomIntBetween(1, 5);\n for (int i = 0; i < numFields; i++) {\n ingestMetadata.put(randomAsciiOfLengthBetween(5, 10), randomAsciiOfLengthBetween(5, 10));\n@@ -131,7 +131,7 @@ public void testToXContent() throws IOException {\n \n Map<String, Object> toXContentDoc = (Map<String, Object>) toXContentMap.get(\"doc\");\n Map<String, Object> toXContentSource = (Map<String, Object>) toXContentDoc.get(\"_source\");\n- Map<String, String> toXContentIngestMetadata = (Map<String, String>) toXContentDoc.get(\"_ingest\");\n+ Map<String, Object> toXContentIngestMetadata = (Map<String, Object>) toXContentDoc.get(\"_ingest\");\n \n Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();\n for (Map.Entry<IngestDocument.MetaData, String> metadata : metadataMap.entrySet()) {", "filename": "core/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java", "status": "modified" }, { "diff": "@@ -86,7 +86,7 @@ public void testIgnoreFailure() throws Exception {\n public void testSingleProcessorWithOnFailureProcessor() throws Exception {\n TestProcessor processor1 = new TestProcessor(\"id\", \"first\", ingestDocument -> {throw new RuntimeException(\"error\");});\n TestProcessor processor2 = new TestProcessor(ingestDocument -> {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n assertThat(ingestMetadata.size(), equalTo(3));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo(\"error\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"first\"));\n@@ -104,15 +104,15 @@ public void testSingleProcessorWithOnFailureProcessor() throws Exception {\n public void testSingleProcessorWithNestedFailures() throws Exception {\n TestProcessor processor = new TestProcessor(\"id\", \"first\", ingestDocument -> {throw new RuntimeException(\"error\");});\n TestProcessor processorToFail = new TestProcessor(\"id2\", \"second\", ingestDocument -> {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n assertThat(ingestMetadata.size(), equalTo(3));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo(\"error\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"first\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo(\"id\"));\n throw new RuntimeException(\"error\");\n });\n TestProcessor lastProcessor = new TestProcessor(ingestDocument -> {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n assertThat(ingestMetadata.size(), equalTo(3));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo(\"error\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"second\"));\n@@ -131,7 +131,7 @@ public void testSingleProcessorWithNestedFailures() throws Exception {\n public void testCompoundProcessorExceptionFailWithoutOnFailure() throws Exception {\n TestProcessor firstProcessor = new TestProcessor(\"id1\", \"first\", ingestDocument -> {throw new RuntimeException(\"error\");});\n TestProcessor secondProcessor = new TestProcessor(\"id3\", \"second\", ingestDocument -> {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n assertThat(ingestMetadata.entrySet(), hasSize(3));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo(\"error\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"first\"));\n@@ -153,7 +153,7 @@ public void testCompoundProcessorExceptionFail() throws Exception {\n TestProcessor failProcessor =\n new TestProcessor(\"tag_fail\", \"fail\", ingestDocument -> {throw new RuntimeException(\"custom error message\");});\n TestProcessor secondProcessor = new TestProcessor(\"id3\", \"second\", ingestDocument -> {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n assertThat(ingestMetadata.entrySet(), hasSize(3));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo(\"custom error message\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"fail\"));\n@@ -176,7 +176,7 @@ public void testCompoundProcessorExceptionFailInOnFailure() throws Exception {\n TestProcessor failProcessor =\n new TestProcessor(\"tag_fail\", \"fail\", ingestDocument -> {throw new RuntimeException(\"custom error message\");});\n TestProcessor secondProcessor = new TestProcessor(\"id3\", \"second\", ingestDocument -> {\n- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();\n+ Map<String, Object> ingestMetadata = ingestDocument.getIngestMetadata();\n assertThat(ingestMetadata.entrySet(), hasSize(3));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo(\"custom error message\"));\n assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo(\"fail\"));", "filename": "core/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java", "status": "modified" }, { "diff": "@@ -907,7 +907,7 @@ public void testEqualsAndHashcode() throws Exception {\n for (int i = 0; i < numFields; i++) {\n sourceAndMetadata.put(randomFrom(IngestDocument.MetaData.values()).getFieldName(), randomAsciiOfLengthBetween(5, 10));\n }\n- Map<String, String> ingestMetadata = new HashMap<>();\n+ Map<String, Object> ingestMetadata = new HashMap<>();\n numFields = randomIntBetween(1, 5);\n for (int i = 0; i < numFields; i++) {\n ingestMetadata.put(randomAsciiOfLengthBetween(5, 10), randomAsciiOfLengthBetween(5, 10));\n@@ -930,7 +930,7 @@ public void testEqualsAndHashcode() throws Exception {\n changed = true;\n }\n \n- Map<String, String> otherIngestMetadata;\n+ Map<String, Object> otherIngestMetadata;\n if (randomBoolean()) {\n otherIngestMetadata = new HashMap<>();\n numFields = randomIntBetween(1, 5);\n@@ -962,7 +962,7 @@ public void testIngestMetadataTimestamp() throws Exception {\n long before = System.currentTimeMillis();\n IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());\n long after = System.currentTimeMillis();\n- String timestampString = ingestDocument.getIngestMetadata().get(\"timestamp\");\n+ String timestampString = (String) ingestDocument.getIngestMetadata().get(\"timestamp\");\n assertThat(timestampString, notNullValue());\n assertThat(timestampString, endsWith(\"+0000\"));\n DateFormat df = new SimpleDateFormat(\"yyyy-MM-dd'T'HH:mm:ss.SSSZZ\", Locale.ROOT);", "filename": "core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java", "status": "modified" }, { "diff": "@@ -859,8 +859,16 @@ because it is likely that the number of elements in an array is unknown. For thi\n processor exists. By specifying the field holding array elements and a processor that\n defines what should happen to each element, array fields can easily be preprocessed.\n \n-A processor inside the foreach processor works in a different context, and the only valid top-level\n-field is `_value`, which holds the array element value. Under this field other fields may exist.\n+A processor inside the foreach processor works in the array element context and puts that in the ingest metadata\n+under the `_ingest._value` key. If the array element is a json object it holds all immediate fields of that json object.\n+and if the nested object is a value is `_ingest._value` just holds that value. Note that if a processor prior to the\n+`foreach` processor used `_ingest._value` key then the specified value will not be available to the processor inside\n+the `foreach` processor. The `foreach` processor does restore the original value, so that value is available to processors\n+after the `foreach` processor.\n+\n+Note that any other field from the document are accessible and modifiable like with all other processors. This processor\n+just puts the current array element being read into `_ingest._value` ingest metadata attribute, so that it may be\n+pre-processed.\n \n If the `foreach` processor fails to process an element inside the array, and no `on_failure` processor has been specified,\n then it aborts the execution and leaves the array unmodified.\n@@ -892,7 +900,7 @@ When this `foreach` processor operates on this sample document:\n \"field\" : \"values\",\n \"processor\" : {\n \"uppercase\" : {\n- \"field\" : \"_value\"\n+ \"field\" : \"_ingest._value\"\n }\n }\n }\n@@ -936,7 +944,7 @@ so the following `foreach` processor is used:\n \"field\" : \"persons\",\n \"processor\" : {\n \"remove\" : {\n- \"field\" : \"_value.id\"\n+ \"field\" : \"_ingest._value.id\"\n }\n }\n }\n@@ -959,9 +967,7 @@ After preprocessing the result is:\n }\n --------------------------------------------------\n \n-As for any processor, you can define `on_failure` processors\n-in processors that are wrapped inside the `foreach` processor.\n-\n+The wrapped processor can have a `on_failure` definition.\n For example, the `id` field may not exist on all person objects.\n Instead of failing the index request, you can use an `on_failure`\n block to send the document to the 'failure_index' index for later inspection:", "filename": "docs/reference/ingest/ingest-node.asciidoc", "status": "modified" }, { "diff": "@@ -62,11 +62,12 @@ public void execute(IngestDocument ingestDocument) throws Exception {\n List<Object> values = ingestDocument.getFieldValue(field, List.class);\n List<Object> newValues = new ArrayList<>(values.size());\n for (Object value : values) {\n- Map<String, Object> innerSource = new HashMap<>(ingestDocument.getSourceAndMetadata());\n- innerSource.put(\"_value\", value); // scalar value to access the list item being evaluated\n- IngestDocument innerIngestDocument = new IngestDocument(innerSource, ingestDocument.getIngestMetadata());\n- processor.execute(innerIngestDocument);\n- newValues.add(innerSource.get(\"_value\"));\n+ Object previousValue = ingestDocument.getIngestMetadata().put(\"_value\", value);\n+ try {\n+ processor.execute(ingestDocument);\n+ } finally {\n+ newValues.add(ingestDocument.getIngestMetadata().put(\"_value\", previousValue));\n+ }\n }\n ingestDocument.setFieldValue(field, newValues);\n }", "filename": "modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java", "status": "modified" }, { "diff": "@@ -49,7 +49,7 @@ public void testExecute() throws Exception {\n );\n \n ForEachProcessor processor = new ForEachProcessor(\n- \"_tag\", \"values\", new UppercaseProcessor(\"_tag\", \"_value\")\n+ \"_tag\", \"values\", new UppercaseProcessor(\"_tag\", \"_ingest._value\")\n );\n processor.execute(ingestDocument);\n \n@@ -65,7 +65,7 @@ public void testExecuteWithFailure() throws Exception {\n );\n \n TestProcessor testProcessor = new TestProcessor(id -> {\n- if (\"c\".equals(id.getFieldValue(\"_value\", String.class))) {\n+ if (\"c\".equals(id.getFieldValue(\"_ingest._value\", String.class))) {\n throw new RuntimeException(\"failure\");\n }\n });\n@@ -80,11 +80,11 @@ public void testExecuteWithFailure() throws Exception {\n assertThat(ingestDocument.getFieldValue(\"values\", List.class), equalTo(Arrays.asList(\"a\", \"b\", \"c\")));\n \n testProcessor = new TestProcessor(id -> {\n- String value = id.getFieldValue(\"_value\", String.class);\n+ String value = id.getFieldValue(\"_ingest._value\", String.class);\n if (\"c\".equals(value)) {\n throw new RuntimeException(\"failure\");\n } else {\n- id.setFieldValue(\"_value\", value.toUpperCase(Locale.ROOT));\n+ id.setFieldValue(\"_ingest._value\", value.toUpperCase(Locale.ROOT));\n }\n });\n Processor onFailureProcessor = new TestProcessor(ingestDocument1 -> {});\n@@ -105,9 +105,9 @@ public void testMetaDataAvailable() throws Exception {\n );\n \n TestProcessor innerProcessor = new TestProcessor(id -> {\n- id.setFieldValue(\"_value.index\", id.getSourceAndMetadata().get(\"_index\"));\n- id.setFieldValue(\"_value.type\", id.getSourceAndMetadata().get(\"_type\"));\n- id.setFieldValue(\"_value.id\", id.getSourceAndMetadata().get(\"_id\"));\n+ id.setFieldValue(\"_ingest._value.index\", id.getSourceAndMetadata().get(\"_index\"));\n+ id.setFieldValue(\"_ingest._value.type\", id.getSourceAndMetadata().get(\"_type\"));\n+ id.setFieldValue(\"_ingest._value.id\", id.getSourceAndMetadata().get(\"_id\"));\n });\n ForEachProcessor processor = new ForEachProcessor(\"_tag\", \"values\", innerProcessor);\n processor.execute(ingestDocument);\n@@ -136,7 +136,7 @@ public void testRestOfTheDocumentIsAvailable() throws Exception {\n \n TemplateService ts = TestTemplateService.instance();\n ForEachProcessor processor = new ForEachProcessor(\n- \"_tag\", \"values\", new SetProcessor(\"_tag\", ts.compile(\"_value.new_field\"), (model) -> model.get(\"other\"))\n+ \"_tag\", \"values\", new SetProcessor(\"_tag\", ts.compile(\"_ingest._value.new_field\"), (model) -> model.get(\"other\"))\n );\n processor.execute(ingestDocument);\n \n@@ -151,8 +151,8 @@ public void testRandom() throws Exception {\n Processor innerProcessor = new Processor() {\n @Override\n public void execute(IngestDocument ingestDocument) throws Exception {\n- String existingValue = ingestDocument.getFieldValue(\"_value\", String.class);\n- ingestDocument.setFieldValue(\"_value\", existingValue + \".\");\n+ String existingValue = ingestDocument.getFieldValue(\"_ingest._value\", String.class);\n+ ingestDocument.setFieldValue(\"_ingest._value\", existingValue + \".\");\n }\n \n @Override\n@@ -184,4 +184,91 @@ public String getTag() {\n }\n }\n \n+ public void testModifyFieldsOutsideArray() throws Exception {\n+ List<Object> values = new ArrayList<>();\n+ values.add(\"string\");\n+ values.add(1);\n+ values.add(null);\n+ IngestDocument ingestDocument = new IngestDocument(\n+ \"_index\", \"_type\", \"_id\", null, null, null, null, Collections.singletonMap(\"values\", values)\n+ );\n+\n+ TemplateService ts = TestTemplateService.instance();\n+\n+ ForEachProcessor processor = new ForEachProcessor(\n+ \"_tag\", \"values\", new CompoundProcessor(false,\n+ Collections.singletonList(new UppercaseProcessor(\"_tag_upper\", \"_ingest._value\")),\n+ Collections.singletonList(new AppendProcessor(\"_tag\",\n+ ts.compile(\"errors\"), (model) -> (Collections.singletonList(\"added\"))))\n+ ));\n+ processor.execute(ingestDocument);\n+\n+ List<String> result = ingestDocument.getFieldValue(\"values\", List.class);\n+ assertThat(result.get(0), equalTo(\"STRING\"));\n+ assertThat(result.get(1), equalTo(1));\n+ assertThat(result.get(2), equalTo(null));\n+\n+ List<String> errors = ingestDocument.getFieldValue(\"errors\", List.class);\n+ assertThat(errors.size(), equalTo(2));\n+ }\n+\n+ public void testScalarValueAllowsUnderscoreValueFieldToRemainAccessible() throws Exception {\n+ List<Object> values = new ArrayList<>();\n+ values.add(\"please\");\n+ values.add(\"change\");\n+ values.add(\"me\");\n+ Map<String, Object> source = new HashMap<>();\n+ source.put(\"_value\", \"new_value\");\n+ source.put(\"values\", values);\n+ IngestDocument ingestDocument = new IngestDocument(\n+ \"_index\", \"_type\", \"_id\", null, null, null, null, source\n+ );\n+\n+ TestProcessor processor = new TestProcessor(doc -> doc.setFieldValue(\"_ingest._value\",\n+ doc.getFieldValue(\"_source._value\", String.class)));\n+ ForEachProcessor forEachProcessor = new ForEachProcessor(\"_tag\", \"values\", processor);\n+ forEachProcessor.execute(ingestDocument);\n+\n+ List<String> result = ingestDocument.getFieldValue(\"values\", List.class);\n+ assertThat(result.get(0), equalTo(\"new_value\"));\n+ assertThat(result.get(1), equalTo(\"new_value\"));\n+ assertThat(result.get(2), equalTo(\"new_value\"));\n+ }\n+\n+ public void testNestedForEach() throws Exception {\n+ List<Map<String, Object>> values = new ArrayList<>();\n+ List<Object> innerValues = new ArrayList<>();\n+ innerValues.add(\"abc\");\n+ innerValues.add(\"def\");\n+ Map<String, Object> value = new HashMap<>();\n+ value.put(\"values2\", innerValues);\n+ values.add(value);\n+\n+ innerValues = new ArrayList<>();\n+ innerValues.add(\"ghi\");\n+ innerValues.add(\"jkl\");\n+ value = new HashMap<>();\n+ value.put(\"values2\", innerValues);\n+ values.add(value);\n+\n+ IngestDocument ingestDocument = new IngestDocument(\n+ \"_index\", \"_type\", \"_id\", null, null, null, null, Collections.singletonMap(\"values1\", values)\n+ );\n+\n+ TestProcessor testProcessor = new TestProcessor(\n+ doc -> doc.setFieldValue(\"_ingest._value\", doc.getFieldValue(\"_ingest._value\", String.class).toUpperCase(Locale.ENGLISH))\n+ );\n+ ForEachProcessor processor = new ForEachProcessor(\n+ \"_tag\", \"values1\", new ForEachProcessor(\"_tag\", \"_ingest._value.values2\", testProcessor));\n+ processor.execute(ingestDocument);\n+\n+ List<String> result = ingestDocument.getFieldValue(\"values1.0.values2\", List.class);\n+ assertThat(result.get(0), equalTo(\"ABC\"));\n+ assertThat(result.get(1), equalTo(\"DEF\"));\n+\n+ result = ingestDocument.getFieldValue(\"values1.1.values2\", List.class);\n+ assertThat(result.get(0), equalTo(\"GHI\"));\n+ assertThat(result.get(1), equalTo(\"JKL\"));\n+ }\n+\n }", "filename": "modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java", "status": "modified" }, { "diff": "@@ -19,7 +19,7 @@ teardown:\n \"field\" : \"values\",\n \"processor\" : {\n \"uppercase\" : {\n- \"field\" : \"_value\"\n+ \"field\" : \"_ingest._value\"\n }\n }\n }", "filename": "modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/80_foreach.yaml", "status": "modified" }, { "diff": "@@ -234,7 +234,7 @@\n \"processor\": {\n \"append\": {\n \"field\": \"values_flat\",\n- \"value\": \"{{_value.key}}_{{_value.value}}\"\n+ \"value\": \"{{_ingest._value.key}}_{{_ingest._value.value}}\"\n }\n }\n }", "filename": "qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yaml", "status": "modified" }, { "diff": "@@ -84,7 +84,7 @@\n \"field\" : \"friends\",\n \"processor\" : {\n \"remove\" : {\n- \"field\" : \"_value.id\"\n+ \"field\" : \"_ingest._value.id\"\n }\n }\n }\n@@ -106,7 +106,7 @@\n \"field\" : \"address\",\n \"processor\" : {\n \"trim\" : {\n- \"field\" : \"_value\"\n+ \"field\" : \"_ingest._value\"\n }\n }\n }", "filename": "qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yaml", "status": "modified" } ] }
{ "body": "Best explained through some sense commands:\n\n``` json\nPOST my-index/my-type\n{ \"name\" : \"a\" }\n\nPOST my-index/my-type\n{ \"name\" : \"b\" }\n\n#works\nPOST my-index/my-type/_search\nPOST my-index/my-type/_count\n\n#works\nPOST my-index/my-type/_search\n{}\n\n#does not work\nPOST my-index/my-type/_count\n{}\n```\n\nThe latter will fail with:\n\n``` json\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"parsing_exception\",\n \"reason\": \"Required query is missing\",\n \"line\": 1,\n \"col\": 2\n }\n ],\n \"type\": \"parsing_exception\",\n \"reason\": \"Required query is missing\",\n \"line\": 1,\n \"col\": 2\n },\n \"status\": 400\n}\n```\n\nwhich feels like a discrepency between the two API's.\n\nTested against current `master`\n", "comments": [ { "body": "I tested this on 2.3, the behaviour for `_count` and `_search` is the same there. I opened a PR to remove throwing the error when no `query` parameter is specified.\n", "created_at": "2016-07-26T08:14:34Z" } ], "number": 19422, "title": "_count requires query in body where _search does not" }
{ "body": "When the request body is missing, all documents in the target index are counted.\nAs mentioned in #19422, the same should happen when the request body is an empty\njson object. This is also the behaviour for the `_search` endpoint and the two\nAPIs should behave in the same way.\n\nCloses #19422 \n", "number": 19595, "review_comments": [ { "body": "maybe this should stay consistent with the exception that we throw whenever some deprecated syntax is used?\n", "created_at": "2016-07-26T11:46:03Z" }, { "body": "Ah, I understand, so this was `IllegalArgumentException` because thats what `ParseFieldmatcher.match()` uses for field names. I will revert this change and add a comment here to explain this.\n", "created_at": "2016-07-26T13:04:16Z" }, { "body": "sounds good thanks\n", "created_at": "2016-07-26T13:40:06Z" } ], "title": "Allow empty json object in request body in `_count` API" }
{ "commits": [ { "message": "Allow empty json object in request body in `_count` API\n\nWhen the request body is missing, all documents in the target index are counted.\nAs mentioned in #19422, the same should happen when the request body is an empty\njson object. This is also the behaviour for the `_search` endpoint and the two\nAPIs should behave in the same way." }, { "message": "Adding unit tests for QueryParseContext" } ], "files": [ { "diff": "@@ -73,9 +73,6 @@ public QueryBuilder parseTopLevelQueryBuilder() {\n }\n }\n }\n- if (queryBuilder == null) {\n- throw new ParsingException(parser.getTokenLocation(), \"Required query is missing\");\n- }\n return queryBuilder;\n } catch (ParsingException e) {\n throw e;\n@@ -113,7 +110,7 @@ public Optional<QueryBuilder> parseInnerQueryBuilder() throws IOException {\n // move to the next START_OBJECT\n token = parser.nextToken();\n if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) {\n- throw new ParsingException(parser.getTokenLocation(), \"[_na] query malformed, no field after start_object\");\n+ throw new ParsingException(parser.getTokenLocation(), \"[_na] query malformed, no start_object after query name\");\n }\n @SuppressWarnings(\"unchecked\")\n Optional<QueryBuilder> result = (Optional<QueryBuilder>) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher,", "filename": "core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java", "status": "modified" }, { "diff": "@@ -0,0 +1,128 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.index.query;\n+\n+import org.elasticsearch.common.ParseFieldMatcher;\n+import org.elasticsearch.common.ParsingException;\n+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.XContentFactory;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n+import org.elasticsearch.indices.query.IndicesQueriesRegistry;\n+import org.elasticsearch.search.SearchModule;\n+import org.elasticsearch.test.ESTestCase;\n+import org.junit.BeforeClass;\n+\n+import java.io.IOException;\n+import java.util.Optional;\n+\n+import static java.util.Collections.emptyList;\n+\n+public class QueryParseContextTests extends ESTestCase {\n+\n+ private static IndicesQueriesRegistry indicesQueriesRegistry;\n+\n+ @BeforeClass\n+ public static void init() {\n+ indicesQueriesRegistry = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, emptyList())\n+ .getQueryParserRegistry();\n+ }\n+\n+ public void testParseTopLevelBuilder() throws IOException {\n+ QueryBuilder query = new MatchQueryBuilder(\"foo\", \"bar\");\n+ String requestBody = \"{ \\\"query\\\" : \" + query.toString() + \"}\";\n+ try (XContentParser parser = XContentFactory.xContent(requestBody).createParser(requestBody)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ QueryBuilder actual = context.parseTopLevelQueryBuilder();\n+ assertEquals(query, actual);\n+ }\n+ }\n+\n+ public void testParseTopLevelBuilderEmptyObject() throws IOException {\n+ String requestBody = \"{}\";\n+ try (XContentParser parser = XContentFactory.xContent(requestBody).createParser(requestBody)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ QueryBuilder query = context.parseTopLevelQueryBuilder();\n+ assertNull(query);\n+ }\n+ }\n+\n+ public void testParseTopLevelBuilderUnknownParameter() throws IOException {\n+ String requestBody = \"{ \\\"foo\\\" : \\\"bar\\\"}\";\n+ try (XContentParser parser = XContentFactory.xContent(requestBody).createParser(requestBody)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ ParsingException exception = expectThrows(ParsingException.class, () -> context.parseTopLevelQueryBuilder());\n+ assertEquals(\"request does not support [foo]\", exception.getMessage());\n+ }\n+ }\n+\n+ public void testParseInnerQueryBuilder() throws IOException {\n+ QueryBuilder query = new MatchQueryBuilder(\"foo\", \"bar\");\n+ String source = query.toString();\n+ try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ Optional<QueryBuilder> actual = context.parseInnerQueryBuilder();\n+ assertEquals(query, actual.get());\n+ }\n+ }\n+\n+ public void testParseInnerQueryBuilderEmptyBody() throws IOException {\n+ String source = \"{}\";\n+ try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.EMPTY);\n+ Optional<QueryBuilder> emptyQuery = context.parseInnerQueryBuilder();\n+ assertFalse(emptyQuery.isPresent());\n+ }\n+ }\n+\n+ public void testParseInnerQueryBuilderExceptions() throws IOException {\n+ String source = \"{ \\\"foo\\\": \\\"bar\\\" }\";\n+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(source)) {\n+ parser.nextToken();\n+ parser.nextToken(); // don't start with START_OBJECT to provoke exception\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ ParsingException exception = expectThrows(ParsingException.class, () -> context.parseInnerQueryBuilder());\n+ assertEquals(\"[_na] query malformed, must start with start_object\", exception.getMessage());\n+ }\n+\n+ source = \"{}\";\n+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(source)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> context.parseInnerQueryBuilder());\n+ assertEquals(\"query malformed, empty clause found at [1:2]\", exception.getMessage());\n+ }\n+\n+ source = \"{ \\\"foo\\\" : \\\"bar\\\" }\";\n+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(source)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ ParsingException exception = expectThrows(ParsingException.class, () -> context.parseInnerQueryBuilder());\n+ assertEquals(\"[_na] query malformed, no start_object after query name\", exception.getMessage());\n+ }\n+\n+ source = \"{ \\\"foo\\\" : {} }\";\n+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(source)) {\n+ QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);\n+ ParsingException exception = expectThrows(ParsingException.class, () -> context.parseInnerQueryBuilder());\n+ assertEquals(\"no [query] registered for [foo]\", exception.getMessage());\n+ }\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java", "status": "added" }, { "diff": "@@ -37,6 +37,24 @@ setup:\n \n - match: {count : 0}\n \n+---\n+\"count with empty body\":\n+# empty body should default to match_all query\n+ - do:\n+ count:\n+ index: test\n+ type: test\n+ body: { }\n+\n+ - match: {count : 1}\n+\n+ - do:\n+ count:\n+ index: test\n+ type: test\n+\n+ - match: {count : 1}\n+\n ---\n \"count body without query element\":\n - do:", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yaml", "status": "modified" } ] }
{ "body": "Previously when trying to listen on virtual interfaces during\nbootstrap the application would stop working - the interface\ncouldn't be found by the NetworkUtils class.\n\nThe NetworkUtils utilize the underlying JDK NetworkInterface\nclass which, when asked to lookup by name only takes physical\ninterfaces into account, failing at virtual (or subinterfaces)\nones (returning null).\n\nNote that when interating over all interfaces, both physical and\nvirtual ones are taken into account.\n\nThis changeset asks for all known interfaces, iterates over them\nand matches on the given name as part of the loop, allowing it\nto catch both physical and virtual interfaces.\n\nAs a result, elasticsearch can now also serve on virtual\ninterfaces.\n\nA test case has been added which at least makes sure that all\niterable interfaces can be found by their respective name. (It's\nnot easily possible in a unit test to \"fake\" virtual interfaces).\n\nCloses #17473\n", "comments": [ { "body": "Can one of the admins verify this patch?\n", "created_at": "2016-07-21T14:11:26Z" }, { "body": "I signed the CLA before sending this PR, maybe it didn't go \"through\" yet.\n", "created_at": "2016-07-21T14:12:02Z" }, { "body": "In case someone wants to verify, you can easily create a virtual interface on linux like:\n\n```\nsudo ifconfig eth0:1 10.2.16\n```\n\nNow in the config set the `network.host` to `_eth0:1_`. \n\nStarting 2.3.4 prints\n\n```\nException in thread \"main\" java.lang.IllegalArgumentException: No interface named 'eth0:1' found, got [name:lo (lo), name:eth0 (eth0), name:eth0:1 (eth0:1), name:eth1 (eth1)]\n at org.elasticsearch.common.network.NetworkUtils.getAddressesForInterface(NetworkUtils.java:232)\n at org.elasticsearch.common.network.NetworkService.resolveInternal(NetworkService.java:262)\n at org.elasticsearch.common.network.NetworkService.resolveInetAddresses(NetworkService.java:209)\n at org.elasticsearch.common.network.NetworkService.resolveBindHostAddresses(NetworkService.java:122)\n at org.elasticsearch.transport.netty.NettyTransport.bindServerBootstrap(NettyTransport.java:424)\n at org.elasticsearch.transport.netty.NettyTransport.doStart(NettyTransport.java:321)\n at org.elasticsearch.common.component.AbstractLifecycleComponent.start(AbstractLifecycleComponent.java:68)\n at org.elasticsearch.transport.TransportService.doStart(TransportService.java:182)\n at org.elasticsearch.common.component.AbstractLifecycleComponent.start(AbstractLifecycleComponent.java:68)\n at org.elasticsearch.node.Node.start(Node.java:278)\n at org.elasticsearch.bootstrap.Bootstrap.start(Bootstrap.java:206)\n at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:272)\n at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35)\n```\n\nwhile with the change applied:\n\n```\n[2016-07-21 14:08:00,095][INFO ][transport ] [Murmur II] publish_address {10.2.0.16:9300}, bound_addresses {10.2.0.16:9300}\n[2016-07-21 14:08:00,100][INFO ][bootstrap ] [Murmur II] bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks\n```\n", "created_at": "2016-07-21T14:18:31Z" }, { "body": "> It's not easily possible in a unit test to \"fake\" virtual interfaces\n\nThis is not right, testing is always possible. For example, this simple change adds a package-private method that can easily be tested with \"fake\" virtual interfaces:\n\n``` diff\ndiff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java b/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java\nindex e15073e..6f098ef 100644\n--- a/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java\n+++ b/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java\n@@ -227,8 +227,12 @@ public abstract class NetworkUtils {\n\n /** Returns addresses for the given interface (it must be marked up) */\n static InetAddress[] getAddressesForInterface(String name) throws SocketException {\n+ return getAddressesForInterface(name, getInterfaces());\n+ }\n+\n+ static InetAddress[] getAddressesForInterface(String name, List<NetworkInterface> interfaces) throws SocketException {\n NetworkInterface intf = null;\n- for (NetworkInterface networkInterface : getInterfaces()) {\n+ for (NetworkInterface networkInterface : interfaces) {\n if (name.equals(networkInterface.getName())) {\n intf = networkInterface;\n break;\n@@ -236,7 +240,7 @@ public abstract class NetworkUtils {\n }\n\n if (intf == null) {\n- throw new IllegalArgumentException(\"No interface named '\" + name + \"' found, got \" + getInterfaces());\n+ throw new IllegalArgumentException(\"No interface named '\" + name + \"' found, got \" + interfaces);\n }\n if (!intf.isUp()) {\n throw new IllegalArgumentException(\"Interface '\" + name + \"' is not up and running\");\n```\n", "created_at": "2016-07-21T14:27:24Z" }, { "body": "@jasontedor gotcha, I was actually referring to faking an actual virtual interface that would reproduce the scenario when interacting with the original JVM methods. Any idea how that could be done?\n\nDo you think I should make the change you suggested? Not sure it would provide more coverage for the specific case in question.\n", "created_at": "2016-07-21T14:31:34Z" }, { "body": "The change is good. It should have been calling this method all along: it even calls it for error messages (making the bug more confusing, because it shows you the exact one you tried to set in the list).\n", "created_at": "2016-07-21T14:53:25Z" }, { "body": "> I was actually referring to faking an actual virtual interface that would reproduce the scenario when interacting with the original JVM methods. Any idea how that could be done?\n\nIt's fine to just mock the behavior of the JVM method as I suggested.\n\n> Do you think I should make the change you suggested?\n\nYes, it should be possible to write a test case that would fail without the change and will pass with your suggested change (which is good).\n", "created_at": "2016-07-21T14:57:34Z" }, { "body": "@jasontedor sounds good I'll make the changes :)\n", "created_at": "2016-07-21T15:16:06Z" }, { "body": "@jasontedor a quick follow up with some question/observations from my digging:\n\nLooks like it's not as easy as I thought because the `NetworkInterface` has no way of setting it to `virtual` - it only has those static initializers which don't provide the semantics we need here I think. Mocking it is also not an option since its a final class, and from the dependencies I can see that powermock is not used in ES right now (apologies if I missed a different way how ES allows to mock final classes, let me know if there is one).\n\nSo I propose doing one of the four things (please share your thoughts if there is something I have missed):\n1. Stick with the one test I added previously, which makes sure that all the listed interfaces (which includes virtual and physical) can also be looked up by name, this is not the case before the patch and the test case would previously fail on a system where there is a virtual interface defined.\n2. If there is a way to mock the final class in ES right now let me know, that is definitely an option.\n3. The most complex one I assume, but if there is a way we could setup a virtual interface if the box is a linux machine and then run the test.. but its probably a little out of scope for this level of testing. Is there a higher level test harness that could cover this case?\n4. Oh, and we could introduce another layer of indirection over the `NetworkInterface` so it is mockable.\n", "created_at": "2016-07-22T05:27:59Z" }, { "body": "I agree, I think NetworkInterface is mostly initialized via native code, we have no chance.\n\nTo be fair to @jasontedor's point, the bug probably happens because we don't test virtual interfaces :) \n\nBut at the same time, we should not try to do something super-elaborate, or compromise the code itself with useless abstractions to do it. We should just test what is reasonable.\n", "created_at": "2016-07-22T11:12:18Z" }, { "body": "> I agree, I think NetworkInterface is mostly initialized via native code, we have no chance.\n\nSadly, this is right. It is [possible](https://gist.github.com/jasontedor/a941c2f51393665234e9ff74dc91e42a) to get this tested, but I agree with Robert's point that it's probably not worth it for just this one test. If we were to go over this code and get it all tested, it might be worth doing what I suggest in this [gist](https://gist.github.com/jasontedor/a941c2f51393665234e9ff74dc91e42a), but it's too far here.\n", "created_at": "2016-07-22T11:49:02Z" }, { "body": "ok to test\n", "created_at": "2016-07-22T11:50:04Z" }, { "body": "Looks like the git checkout failed https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+pull-request+multijob-intake/92/console\n", "created_at": "2016-07-22T12:21:03Z" }, { "body": "test this please\n", "created_at": "2016-07-22T13:36:24Z" }, { "body": "Thanks @daschl.\n", "created_at": "2016-07-22T16:33:29Z" }, { "body": "Thank you for fixing this, @daschl :)\n", "created_at": "2016-07-22T16:47:17Z" }, { "body": "@jasontedor and @daschl - this actually causes test failures if a virtual interface is not up:\n\n```\nSuite: org.elasticsearch.common.network.NetworkUtilsTests\n 2> REPRODUCE WITH: gradle :core:test -Dtests.seed=43BC27A5D082E629 -Dtests.class=org.elasticsearch.common.network.NetworkUtilsTests -Dtests.method=\"testAddressInterfaceLookup\" -Dtests.security.manager=true -Dtests.locale=es-PA -Dtests.timezone=America/Indiana/Indianapolis\nERROR 0.18s | NetworkUtilsTests.testAddressInterfaceLookup <<< FAILURES!\n > Throwable #1: java.lang.IllegalArgumentException: Interface 'virbr0' is not up and running\n > at __randomizedtesting.SeedInfo.seed([43BC27A5D082E629:C4C740E215C89F5F]:0)\n > at org.elasticsearch.common.network.NetworkUtils.getAddressesForInterface(NetworkUtils.java:242)\n > at org.elasticsearch.common.network.NetworkUtilsTests.testAddressInterfaceLookup(NetworkUtilsTests.java:90)\n > at java.lang.Thread.run(Thread.java:745)\n 2> NOTE: leaving temporary files on disk at: /home/hinmanm/es/elasticsearch/core/build/testrun/test/J0/temp/org.elasticsearch.common.network.NetworkUtilsTests_43BC27A5D082E629-001\n 2> NOTE: test params are: codec=Asserting(Lucene60): {}, docValues:{}, maxPointsInLeafNode=619, maxMBSortInHeap=7.4794493142308305, sim=RandomSimilarity(queryNorm=false,coord=no): {}, locale=es-PA, timezone=America/Indiana/Indianapolis\n 2> NOTE: Linux 4.6.3-300.fc24.x86_64 amd64/Oracle Corporation 1.8.0_77 (64-bit)/cpus=4,threads=1,free=481288632,total=514850816\n 2> NOTE: All tests run in this JVM: [NetworkUtilsTests]\nCompleted [1/1] in 1.20s, 1 test, 1 error <<< FAILURES!\n```\n\n(Reproduces every time for me), `virb0` is a virtual interface used for bridging VirtualBox VMs, but stays down while nothing is running.\n", "created_at": "2016-07-22T17:28:08Z" }, { "body": "Reverted in c27237be9fc70b077164e22705a17d25ac1e5d9f.\n", "created_at": "2016-07-22T17:30:26Z" }, { "body": "The test included in the fix here will blow up on any machine that has an interface that is down.\n", "created_at": "2016-07-22T17:33:09Z" }, { "body": "Okay good to know, I'll rework the test case on monday and resubmit it.\n", "created_at": "2016-07-22T18:05:36Z" } ], "number": 19537, "title": "Allow to listen on virtual interfaces." }
{ "body": "Previously when trying to listen on virtual interfaces during\nbootstrap the application would stop working - the interface\ncouldn't be found by the NetworkUtils class.\n\nThe NetworkUtils utilize the underlying JDK NetworkInterface\nclass which, when asked to lookup by name only takes physical\ninterfaces into account, failing at virtual (or subinterfaces)\nones (returning null).\n\nNote that when interating over all interfaces, both physical and\nvirtual ones are taken into account.\n\nThis changeset asks for all known interfaces, iterates over them\nand matches on the given name as part of the loop, allowing it\nto catch both physical and virtual interfaces.\n\nAs a result, elasticsearch can now also serve on virtual\ninterfaces.\n\nA test case has been added which makes sure that all\niterable interfaces can be found by their respective name.\n\nNote that this PR is a second iteration over the previously\nmerged but later reverted #19537 because it causes tests\nto fail when interfaces are down. The test has been modified\nto take this into account now.\n\nCloses #17473\nRelates #19537\n", "number": 19568, "review_comments": [ { "body": "wouldnt a stream using `getInterfaces().stream().filter().findFirst()` be nicer here?\n", "created_at": "2016-08-01T13:00:31Z" }, { "body": "you could use the `assumeTrue()` method to ignore a test if a condition is not given... not too sure if this increases readability, to be honest\n", "created_at": "2016-08-01T13:02:19Z" }, { "body": "wouldn't this change the semantics though in that if lets say the first one is down all the other potential test-able interfaces would be ignored?\n", "created_at": "2016-08-22T07:45:42Z" }, { "body": "oh I didn't realize the codebase was on java 8 - good idea!\n", "created_at": "2016-08-22T07:47:14Z" } ], "title": "Network: Allow to listen on virtual interfaces." }
{ "commits": [ { "message": "Network: Allow to listen on virtual interfaces.\n\nPreviously when trying to listen on virtual interfaces during\nbootstrap the application would stop working - the interface\ncouldn't be found by the NetworkUtils class.\n\nThe NetworkUtils utilize the underlying JDK NetworkInterface\nclass which, when asked to lookup by name only takes physical\ninterfaces into account, failing at virtual (or subinterfaces)\nones (returning null).\n\nNote that when interating over all interfaces, both physical and\nvirtual ones are taken into account.\n\nThis changeset asks for all known interfaces, iterates over them\nand matches on the given name as part of the loop, allowing it\nto catch both physical and virtual interfaces.\n\nAs a result, elasticsearch can now also serve on virtual\ninterfaces.\n\nA test case has been added which makes sure that all\niterable interfaces can be found by their respective name.\n\nNote that this PR is a second iteration over the previously\nmerged but later reverted #19537 because it causes tests\nto fail when interfaces are down. The test has been modified\nto take this into account now.\n\nCloses #17473\nRelates #19537" } ], "files": [ { "diff": "@@ -227,7 +227,14 @@ static InetAddress[] getAllAddresses() throws SocketException {\n \n /** Returns addresses for the given interface (it must be marked up) */\n static InetAddress[] getAddressesForInterface(String name) throws SocketException {\n- NetworkInterface intf = NetworkInterface.getByName(name);\n+ NetworkInterface intf = null;\n+ for (NetworkInterface networkInterface : getInterfaces()) {\n+ if (name.equals(networkInterface.getName())) {\n+ intf = networkInterface;\n+ break;\n+ }\n+ }\n+\n if (intf == null) {\n throw new IllegalArgumentException(\"No interface named '\" + name + \"' found, got \" + getInterfaces());\n }", "filename": "core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java", "status": "modified" }, { "diff": "@@ -22,6 +22,10 @@\n import org.elasticsearch.test.ESTestCase;\n \n import java.net.InetAddress;\n+import java.net.NetworkInterface;\n+import java.util.Arrays;\n+import java.util.Collections;\n+import java.util.Enumeration;\n \n /**\n * Tests for network utils. Please avoid using any methods that cause DNS lookups!\n@@ -74,4 +78,24 @@ public void testFilter() throws Exception {\n assertArrayEquals(new InetAddress[] { InetAddress.getByName(\"127.0.0.1\") }, NetworkUtils.filterIPV4(addresses));\n assertArrayEquals(new InetAddress[] { InetAddress.getByName(\"::1\") }, NetworkUtils.filterIPV6(addresses));\n }\n+\n+ /**\n+ * Test that selecting by name is possible and properly matches the addresses on all interfaces and virtual\n+ * interfaces.\n+ *\n+ * Note that to avoid that this test fails when interfaces are down or they do not have addresses assigned to them,\n+ * they are ignored.\n+ */\n+ public void testAddressInterfaceLookup() throws Exception {\n+ for (NetworkInterface netIf : NetworkUtils.getInterfaces()) {\n+ if (!netIf.isUp() || Collections.list(netIf.getInetAddresses()).isEmpty()) {\n+ continue;\n+ }\n+\n+ String name = netIf.getName();\n+ InetAddress[] expectedAddresses = Collections.list(netIf.getInetAddresses()).toArray(new InetAddress[0]);\n+ InetAddress[] foundAddresses = NetworkUtils.getAddressesForInterface(name);\n+ assertArrayEquals(expectedAddresses, foundAddresses);\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 2.3.3\n\n**JVM version**: 1.8.0_45\n\n**OS version**: OS X 10.11.5\n\n**Description of the problem including expected versus actual behavior**:\nBulk api with updates respond _shards.successful > 0 even if new _source not different from the old _source.\n\n**Steps to reproduce**:\n\n```\nnano request\n{ \"update\" : { \"_index\" : \"test\", \"_type\" : \"test\", \"_id\" : \"1\" } }\n{ \"doc\" : {\"data\" : \"some_data\"}, \"doc_as_upsert\" : true }\n\n# returning _shards.successful = 1\ncurl -XPOST 'localhost:9200/_bulk' --data-binary \"@requests\";\n\n# sending request again. This should return _shards.successful = 0 but instead returns _shards.successful = 1\ncurl -XPOST 'localhost:9200/_bulk' --data-binary \"@requests\";\n```\n\nHere is example with _update api\n\n```\n# returns _shards.successful = 1\ncurl -XPOST localhost:9200/test/test/1/_update?pretty -d '{ \"doc\" : {\"data\" : \"some_data\"}, \"doc_as_upsert\" : true }'\n\n# sending request again. Returns _shards.successful = 0\ncurl -XPOST localhost:9200/test/test/1/_update?pretty -d '{ \"doc\" : {\"data\" : \"some_data\"}, \"doc_as_upsert\" : true }'\n```\n", "comments": [ { "body": "The `update` API should change to return number of successful shards.\n", "created_at": "2016-07-08T09:15:45Z" }, { "body": "@clintongormley \nThere was a noop (data was the same), that is why shards was not updated and _shards.successful = 0.\nBut the point is, that `bulk` api with `update` request in it, should detect noop too and return _shards.successful = 0 in the second call too. That is why `bulk` API with `update` in it, should be changed and return _shards.successful = 0 after update with same `_source`. Am I correct?\n", "created_at": "2016-07-08T12:16:50Z" }, { "body": "@gkozyryatskyy sure - you could argue it either way. I think it is more meaningful to say that the request was processed correctly for this shard, even though it is a noop\n", "created_at": "2016-07-08T16:32:15Z" }, { "body": "@clintongormley \nIf it will return _shards.successful = 1for the same `_source`(request was processed correctly for this shard, even though it is a noop), it will be impossible to understand whether the data was reindexed (i.e does `_source` in `update` request changes the existed `_source`?). Exactly this functionality I need in `bulk` + `update` request. =)\n", "created_at": "2016-07-08T17:40:20Z" }, { "body": "@gkozyryatskyy a better way of doing this would be in this PR https://github.com/elastic/elasticsearch/pull/9736 which was closed only because of a lack of response. We'd still accept this feature if somebody wanted to pick up where this PR left off.\n", "created_at": "2016-07-11T13:30:12Z" }, { "body": "@clintongormley \nPR #9736 seems really a better way. It will be great if you can reopen/merge this feature.\n", "created_at": "2016-07-11T13:49:44Z" } ], "number": 19267, "title": "Updates in bulk api always update shards." }
{ "body": "Revival of #9736\nCloses #9642\nCloses #19267\n\nI attempted to follow the steps suggested by @nik9000 in #9736. \nI was unsure of whether the IndexResponse#isCreated method and \"created\": field should be changed, as the version of ES is way past 2.0.0, when the original comment was written, so I left them.\n\nPerforming the bulk request shown in #19267 now results in the following:\n`{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"1\",\"_version\":1,\"_operation\":\"create\",\"forced_refresh\":false,\"_shards\":{\"total\":2,\"successful\":1,\"failed\":0},\"status\":201}`\n`{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"1\",\"_version\":1,\"_operation\":\"noop\",\"forced_refresh\":false,\"_shards\":{\"total\":2,\"successful\":1,\"failed\":0},\"status\":200}`\n", "number": 19566, "review_comments": [ { "body": "These `Fields` classes have fallen out of favor in the past six months. Rather than add a new constant I'd just add a new string below, similar to what I did when I added `forced_refresh`. If it doesn't bloat the PR too much you can remove the whole `Fields` class and replace with strings at the call sites.\n", "created_at": "2016-07-25T12:58:10Z" }, { "body": "I think you mean `fromOperation`?\n\nI think it'd be slightly better to have `Operation` implement `Writeable` and call this method `readFrom` and have it take the `StreamInput` instead. At least, that is how we've been doing these enums in the past few months.\n", "created_at": "2016-07-25T13:00:29Z" }, { "body": "nit: missing space\n", "created_at": "2016-07-25T13:00:46Z" }, { "body": "I'd just leave the ternary operation there.\n", "created_at": "2016-07-25T13:02:16Z" }, { "body": "I think you can just blast the entire method in this case.\n", "created_at": "2016-07-25T13:02:33Z" }, { "body": "I don't think you need to keep the `found` part of the `toString`. I'd just nuke this line.\n", "created_at": "2016-07-25T13:03:07Z" }, { "body": "If you always want to use the lowercased version of the constant maybe just implement `toString` to return lowercased? I'm not sure what the right thing is here but it isn't a big deal either way.\n", "created_at": "2016-07-25T13:04:10Z" }, { "body": "I don't think you need this. The superclass already has this behavior, right?\n", "created_at": "2016-07-25T13:05:24Z" }, { "body": "Again, I wouldn't pull out the ternary.\n", "created_at": "2016-07-25T13:05:34Z" }, { "body": "Ooooh - maybe we should just delete to `getOperation().status()`? We could probably move this whole method up to the superclass then.\n", "created_at": "2016-07-25T13:07:03Z" }, { "body": "I think you can remove the whole override.\n", "created_at": "2016-07-25T13:07:13Z" }, { "body": "Same feedback as the last `toString` - I think you can remove `created` and might want to look at `Operation`'s `toString`.\n", "created_at": "2016-07-25T13:07:50Z" }, { "body": "Thanks for removing this.\n", "created_at": "2016-07-25T13:08:54Z" }, { "body": "I wonder if with this change you can remove `UpdateHelper.Operation` entirely and just use `DocWriteResult.Operation`. I'm not sure it'd be clear to use `CREATE` instead of `UPSERT` in all the places though.\n", "created_at": "2016-07-25T13:11:52Z" }, { "body": "Same deal as the last `toString`.\n", "created_at": "2016-07-25T13:14:41Z" }, { "body": "I'm tempted to remove `isCreated` and just use `getOperation` everywhere. Might make more sense to do in a followup PR because it is a lot of small mechanical changes.\n", "created_at": "2016-07-25T13:16:19Z" }, { "body": "This is different for IndexResponse/UpdateResponse and DeleteResponse; index and update check for Operation.CREATED, whereas DeleteResponse is looking for the absence of Operation.DELETED. We could make this one default and override it in DeleteResponse though, although it might be more explicit to have them in each subclass?\n", "created_at": "2016-07-26T06:29:33Z" }, { "body": "I think i'll shove this in a follow-up PR if that's okay. There are a lot of tests that use this method.\n", "created_at": "2016-07-26T06:30:58Z" }, { "body": "Since it is defined on the superclass it'll still be there, just not a delegates-to-superclass override.\n", "created_at": "2016-07-26T12:54:47Z" }, { "body": "I see it now - I think how you've got it now is the most right thing then.\n", "created_at": "2016-07-26T12:56:31Z" } ], "title": "Adding _operation field to index, update, delete response." }
{ "commits": [ { "message": "Add noop property to UpdateResponse\n\nConflicts:\n\tsrc/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java" }, { "message": "Added _operation field" }, { "message": "fixing style & test cases" } ], "files": [ { "diff": "@@ -24,36 +24,91 @@\n import org.elasticsearch.action.support.replication.ReplicationResponse;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.io.stream.Writeable;\n import org.elasticsearch.common.xcontent.StatusToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.rest.RestStatus;\n \n import java.io.IOException;\n+import java.util.Locale;\n \n /**\n * A base class for the response of a write operation that involves a single doc\n */\n public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent {\n \n+ public enum Operation implements Writeable {\n+ CREATE(0),\n+ INDEX(1),\n+ DELETE(2),\n+ NOOP(3);\n+\n+ private final byte op;\n+ private final String lowercase;\n+\n+ Operation(int op) {\n+ this.op = (byte) op;\n+ this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);\n+ }\n+\n+ public byte getOp() {\n+ return op;\n+ }\n+\n+ public String getLowercase() {\n+ return lowercase;\n+ }\n+\n+ public static Operation readFrom(StreamInput in) throws IOException{\n+ Byte opcode = in.readByte();\n+ switch(opcode){\n+ case 0:\n+ return CREATE;\n+ case 1:\n+ return INDEX;\n+ case 2:\n+ return DELETE;\n+ case 3:\n+ return NOOP;\n+ default:\n+ throw new IllegalArgumentException(\"Unknown operation code: \" + opcode);\n+ }\n+ }\n+\n+ @Override\n+ public void writeTo(StreamOutput out) throws IOException {\n+ out.writeByte(op);\n+ }\n+ }\n+\n private ShardId shardId;\n private String id;\n private String type;\n private long version;\n private boolean forcedRefresh;\n+ protected Operation operation;\n \n- public DocWriteResponse(ShardId shardId, String type, String id, long version) {\n+ public DocWriteResponse(ShardId shardId, String type, String id, long version, Operation operation) {\n this.shardId = shardId;\n this.type = type;\n this.id = id;\n this.version = version;\n+ this.operation = operation;\n }\n \n // needed for deserialization\n protected DocWriteResponse() {\n }\n \n+ /**\n+ * The change that occurred to the document.\n+ */\n+ public Operation getOperation() {\n+ return operation;\n+ }\n+\n /**\n * The index the document was changed in.\n */\n@@ -118,6 +173,7 @@ public void readFrom(StreamInput in) throws IOException {\n id = in.readString();\n version = in.readZLong();\n forcedRefresh = in.readBoolean();\n+ operation = Operation.readFrom(in);\n }\n \n @Override\n@@ -128,22 +184,17 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeString(id);\n out.writeZLong(version);\n out.writeBoolean(forcedRefresh);\n- }\n-\n- static final class Fields {\n- static final String _INDEX = \"_index\";\n- static final String _TYPE = \"_type\";\n- static final String _ID = \"_id\";\n- static final String _VERSION = \"_version\";\n+ operation.writeTo(out);\n }\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n ReplicationResponse.ShardInfo shardInfo = getShardInfo();\n- builder.field(Fields._INDEX, shardId.getIndexName())\n- .field(Fields._TYPE, type)\n- .field(Fields._ID, id)\n- .field(Fields._VERSION, version)\n+ builder.field(\"_index\", shardId.getIndexName())\n+ .field(\"_type\", type)\n+ .field(\"_id\", id)\n+ .field(\"_version\", version)\n+ .field(\"_operation\", getOperation().getLowercase())\n .field(\"forced_refresh\", forcedRefresh);\n shardInfo.toXContent(builder, params);\n return builder;", "filename": "core/src/main/java/org/elasticsearch/action/DocWriteResponse.java", "status": "modified" }, { "diff": "@@ -248,7 +248,7 @@ private Tuple<Translog.Location, BulkItemRequest> update(IndexMetaData metaData,\n BytesReference indexSourceAsBytes = indexRequest.source();\n // add the response\n IndexResponse indexResponse = result.getResponse();\n- UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated());\n+ UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getOperation());\n if (updateRequest.fields() != null && updateRequest.fields().length > 0) {\n Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);\n updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));\n@@ -261,7 +261,7 @@ private Tuple<Translog.Location, BulkItemRequest> update(IndexMetaData metaData,\n WriteResult<DeleteResponse> writeResult = updateResult.writeResult;\n DeleteResponse response = writeResult.getResponse();\n DeleteRequest deleteRequest = updateResult.request();\n- updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false);\n+ updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());\n updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));\n // Replace the update request to the translated delete request to execute on the replica.\n item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);", "filename": "core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java", "status": "modified" }, { "diff": "@@ -20,8 +20,6 @@\n package org.elasticsearch.action.delete;\n \n import org.elasticsearch.action.DocWriteResponse;\n-import org.elasticsearch.common.io.stream.StreamInput;\n-import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.rest.RestStatus;\n@@ -36,52 +34,29 @@\n */\n public class DeleteResponse extends DocWriteResponse {\n \n- private boolean found;\n-\n public DeleteResponse() {\n \n }\n \n public DeleteResponse(ShardId shardId, String type, String id, long version, boolean found) {\n- super(shardId, type, id, version);\n- this.found = found;\n+ super(shardId, type, id, version, found ? Operation.DELETE : Operation.NOOP);\n }\n \n-\n /**\n * Returns <tt>true</tt> if a doc was found to delete.\n */\n public boolean isFound() {\n- return found;\n- }\n-\n- @Override\n- public void readFrom(StreamInput in) throws IOException {\n- super.readFrom(in);\n- found = in.readBoolean();\n- }\n-\n- @Override\n- public void writeTo(StreamOutput out) throws IOException {\n- super.writeTo(out);\n- out.writeBoolean(found);\n+ return operation == Operation.DELETE;\n }\n \n @Override\n public RestStatus status() {\n- if (found == false) {\n- return RestStatus.NOT_FOUND;\n- }\n- return super.status();\n- }\n-\n- static final class Fields {\n- static final String FOUND = \"found\";\n+ return isFound() ? super.status() : RestStatus.NOT_FOUND;\n }\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- builder.field(Fields.FOUND, isFound());\n+ builder.field(\"found\", isFound());\n super.toXContent(builder, params);\n return builder;\n }\n@@ -94,7 +69,7 @@ public String toString() {\n builder.append(\",type=\").append(getType());\n builder.append(\",id=\").append(getId());\n builder.append(\",version=\").append(getVersion());\n- builder.append(\",found=\").append(found);\n+ builder.append(\",operation=\").append(getOperation().getLowercase());\n builder.append(\",shards=\").append(getShardInfo());\n return builder.append(\"]\").toString();\n }", "filename": "core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java", "status": "modified" }, { "diff": "@@ -36,42 +36,24 @@\n */\n public class IndexResponse extends DocWriteResponse {\n \n- private boolean created;\n-\n public IndexResponse() {\n \n }\n \n public IndexResponse(ShardId shardId, String type, String id, long version, boolean created) {\n- super(shardId, type, id, version);\n- this.created = created;\n+ super(shardId, type, id, version, created ? Operation.CREATE : Operation.INDEX);\n }\n \n /**\n * Returns true if the document was created, false if updated.\n */\n public boolean isCreated() {\n- return this.created;\n+ return this.operation == Operation.CREATE;\n }\n \n @Override\n public RestStatus status() {\n- if (created) {\n- return RestStatus.CREATED;\n- }\n- return super.status();\n- }\n-\n- @Override\n- public void readFrom(StreamInput in) throws IOException {\n- super.readFrom(in);\n- created = in.readBoolean();\n- }\n-\n- @Override\n- public void writeTo(StreamOutput out) throws IOException {\n- super.writeTo(out);\n- out.writeBoolean(created);\n+ return isCreated() ? RestStatus.CREATED : super.status();\n }\n \n @Override\n@@ -82,19 +64,15 @@ public String toString() {\n builder.append(\",type=\").append(getType());\n builder.append(\",id=\").append(getId());\n builder.append(\",version=\").append(getVersion());\n- builder.append(\",created=\").append(created);\n+ builder.append(\",operation=\").append(getOperation().getLowercase());\n builder.append(\",shards=\").append(getShardInfo());\n return builder.append(\"]\").toString();\n }\n \n- static final class Fields {\n- static final String CREATED = \"created\";\n- }\n-\n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n super.toXContent(builder, params);\n- builder.field(Fields.CREATED, isCreated());\n+ builder.field(\"created\", isCreated());\n return builder;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/action/index/IndexResponse.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.ExceptionsHelper;\n import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ActionRunnable;\n+import org.elasticsearch.action.DocWriteResponse;\n import org.elasticsearch.action.RoutingMissingException;\n import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;\n import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;\n@@ -185,7 +186,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<\n indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {\n @Override\n public void onResponse(IndexResponse response) {\n- UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated());\n+ UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());\n if (request.fields() != null && request.fields().length > 0) {\n Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);\n update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));\n@@ -223,7 +224,7 @@ protected void doRun() {\n indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {\n @Override\n public void onResponse(IndexResponse response) {\n- UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated());\n+ UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());\n update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));\n update.setForcedRefresh(response.forcedRefresh());\n listener.onResponse(update);\n@@ -252,7 +253,7 @@ protected void doRun() {\n deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {\n @Override\n public void onResponse(DeleteResponse response) {\n- UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false);\n+ UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());\n update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));\n update.setForcedRefresh(response.forcedRefresh());\n listener.onResponse(update);", "filename": "core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java", "status": "modified" }, { "diff": "@@ -116,7 +116,7 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult\n request.script.getScript());\n }\n UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(),\n- getResult.getVersion(), false);\n+ getResult.getVersion(), UpdateResponse.convert(Operation.NONE));\n update.setGetResult(getResult);\n return new Result(update, Operation.NONE, upsertDoc, XContentType.JSON);\n }\n@@ -234,12 +234,12 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult\n .setRefreshPolicy(request.getRefreshPolicy());\n return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);\n } else if (\"none\".equals(operation)) {\n- UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);\n+ UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), UpdateResponse.convert(Operation.NONE));\n update.setGetResult(extractGetResult(request, request.index(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef()));\n return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);\n } else {\n logger.warn(\"Used update operation [{}] for script [{}], doing nothing...\", operation, request.script.getScript());\n- UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);\n+ UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), UpdateResponse.convert(Operation.NONE));\n return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java", "status": "modified" }, { "diff": "@@ -29,11 +29,8 @@\n \n import java.io.IOException;\n \n-/**\n- */\n public class UpdateResponse extends DocWriteResponse {\n \n- private boolean created;\n private GetResult getResult;\n \n public UpdateResponse() {\n@@ -43,14 +40,28 @@ public UpdateResponse() {\n * Constructor to be used when a update didn't translate in a write.\n * For example: update script with operation set to none\n */\n- public UpdateResponse(ShardId shardId, String type, String id, long version, boolean created) {\n- this(new ShardInfo(0, 0), shardId, type, id, version, created);\n+ public UpdateResponse(ShardId shardId, String type, String id, long version, Operation operation) {\n+ this(new ShardInfo(0, 0), shardId, type, id, version, operation);\n }\n \n- public UpdateResponse(ShardInfo shardInfo, ShardId shardId, String type, String id, long version, boolean created) {\n- super(shardId, type, id, version);\n+ public UpdateResponse(ShardInfo shardInfo, ShardId shardId, String type, String id,\n+ long version, Operation operation) {\n+ super(shardId, type, id, version, operation);\n setShardInfo(shardInfo);\n- this.created = created;\n+ }\n+\n+ public static Operation convert(UpdateHelper.Operation op) {\n+ switch(op) {\n+ case UPSERT:\n+ return Operation.CREATE;\n+ case INDEX:\n+ return Operation.INDEX;\n+ case DELETE:\n+ return Operation.DELETE;\n+ case NONE:\n+ return Operation.NOOP;\n+ }\n+ throw new IllegalArgumentException();\n }\n \n public void setGetResult(GetResult getResult) {\n@@ -65,22 +76,17 @@ public GetResult getGetResult() {\n * Returns true if document was created due to an UPSERT operation\n */\n public boolean isCreated() {\n- return this.created;\n-\n+ return this.operation == Operation.CREATE;\n }\n \n @Override\n public RestStatus status() {\n- if (created) {\n- return RestStatus.CREATED;\n- }\n- return super.status();\n+ return isCreated() ? RestStatus.CREATED : super.status();\n }\n \n @Override\n public void readFrom(StreamInput in) throws IOException {\n super.readFrom(in);\n- created = in.readBoolean();\n if (in.readBoolean()) {\n getResult = GetResult.readGetResult(in);\n }\n@@ -89,7 +95,6 @@ public void readFrom(StreamInput in) throws IOException {\n @Override\n public void writeTo(StreamOutput out) throws IOException {\n super.writeTo(out);\n- out.writeBoolean(created);\n if (getResult == null) {\n out.writeBoolean(false);\n } else {\n@@ -122,7 +127,7 @@ public String toString() {\n builder.append(\",type=\").append(getType());\n builder.append(\",id=\").append(getId());\n builder.append(\",version=\").append(getVersion());\n- builder.append(\",created=\").append(created);\n+ builder.append(\",operation=\").append(getOperation().getLowercase());\n builder.append(\",shards=\").append(getShardInfo());\n return builder.append(\"]\").toString();\n }", "filename": "core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java", "status": "modified" }, { "diff": "@@ -57,7 +57,7 @@ $ cat requests\n { \"index\" : { \"_index\" : \"test\", \"_type\" : \"type1\", \"_id\" : \"1\" } }\n { \"field1\" : \"value1\" }\n $ curl -s -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n-{\"took\":7,\"items\":[{\"create\":{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\"1\",\"_version\":1}}]}\n+{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\"1\",\"_version\":1,\"_operation\":\"create\",\"forced_refresh\":false}}]}\n --------------------------------------------------\n \n Because this format uses literal `\\n`'s as delimiters, please be sure", "filename": "docs/reference/docs/bulk.asciidoc", "status": "modified" }, { "diff": "@@ -25,7 +25,8 @@ The result of the above delete operation is:\n \"_index\" : \"twitter\",\n \"_type\" : \"tweet\",\n \"_id\" : \"1\",\n- \"_version\" : 2\n+ \"_version\" : 2,\n+ \"_operation: delete\"\n }\n --------------------------------------------------\n ", "filename": "docs/reference/docs/delete.asciidoc", "status": "modified" }, { "diff": "@@ -31,6 +31,7 @@ The result of the above index operation is:\n \"_id\" : \"1\",\n \"_version\" : 1,\n \"created\" : true,\n+ \"_operation\" : create,\n \"forced_refresh\": false\n }\n --------------------------------------------------\n@@ -231,6 +232,7 @@ The result of the above index operation is:\n \"_id\" : \"6a8ca01c-7896-48e9-81cc-9f70661fcb32\",\n \"_version\" : 1,\n \"created\" : true,\n+ \"_operation\": \"create\",\n \"forced_refresh\": false\n }\n --------------------------------------------------", "filename": "docs/reference/docs/index_.asciidoc", "status": "modified" }, { "diff": "@@ -132,8 +132,20 @@ curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{\n }'\n --------------------------------------------------\n \n-If `name` was `new_name` before the request was sent then document is still\n-reindexed.\n+If `name` was `new_name` before the request was sent then the entire update\n+request is ignored. The `operation` element in the response returns `noop` if\n+the request was ignored.\n+\n+[source,js]\n+--------------------------------------------------\n+{\n+ \"_index\": \"test\",\n+ \"_type\": \"type1\",\n+ \"_id\": \"1\",\n+ \"_version\": 1,\n+ \"_operation\": noop\n+}\n+--------------------------------------------------\n \n [[upserts]]\n [float]", "filename": "docs/reference/docs/update.asciidoc", "status": "modified" }, { "diff": "@@ -314,7 +314,7 @@ public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable comman\n };\n ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null);\n simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response);\n- ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); \n+ ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());\n assertThat(e.getMessage(), equalTo(\"EsRejectedExecutionException[test]\"));\n assertThat(client.scrollsCleared, contains(scrollId));\n \n@@ -773,7 +773,7 @@ RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>\n UpdateRequest update = (UpdateRequest) item;\n opType = \"update\";\n response = new UpdateResponse(shardId, update.type(), update.id(),\n- randomIntBetween(0, Integer.MAX_VALUE), true);\n+ randomIntBetween(0, Integer.MAX_VALUE), DocWriteResponse.Operation.CREATE);\n } else if (item instanceof DeleteRequest) {\n DeleteRequest delete = (DeleteRequest) item;\n opType = \"delete\";", "filename": "modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,26 @@\n+---\n+\"Delete operation field\":\n+\n+ - do:\n+ index:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body: { foo: bar }\n+\n+ - do:\n+ delete:\n+ index: test_1\n+ type: test\n+ id: 1\n+\n+ - match: { _operation: delete }\n+\n+ - do:\n+ catch: missing\n+ delete:\n+ index: test_1\n+ type: test\n+ id: 1\n+\n+ - match: { _operation: noop }", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_operation.yaml", "status": "added" }, { "diff": "@@ -0,0 +1,21 @@\n+---\n+\"Index operation field\":\n+\n+ - do:\n+ index:\n+ index: test_index\n+ type: test\n+ id: 1\n+ body: { foo: bar }\n+\n+ - match: { _operation: create }\n+\n+ - do:\n+ index:\n+ index: test_index\n+ type: test\n+ id: 1\n+ body: { foo: bar }\n+ op_type: index\n+\n+ - match: { _operation: index }", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/index/12_operation.yaml", "status": "added" }, { "diff": "@@ -0,0 +1,52 @@\n+---\n+\"Update operation field\":\n+\n+ - do:\n+ update:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body:\n+ doc: { foo: bar }\n+ doc_as_upsert: true\n+\n+ - match: { _version: 1 }\n+ - match: { _operation: create }\n+\n+ - do:\n+ update:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body:\n+ doc: { foo: bar }\n+ doc_as_upsert: true\n+\n+ - match: { _version: 1 }\n+ - match: { _operation: noop }\n+\n+ - do:\n+ update:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body:\n+ doc: { foo: bar }\n+ doc_as_upsert: true\n+ detect_noop: false\n+\n+ - match: { _version: 2 }\n+ - match: { _operation: index }\n+\n+ - do:\n+ update:\n+ index: test_1\n+ type: test\n+ id: 1\n+ body:\n+ doc: { foo: baz }\n+ doc_as_upsert: true\n+ detect_noop: true\n+\n+ - match: { _version: 3 }\n+ - match: { _operation: index }", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/update/12_operation.yaml", "status": "added" } ] }
{ "body": "There is a discrepancy in the behaviour of `multi_match` query when it comes to resolving field names containing wildcard characters.\n\nThe following query will return zero results if the field `subject` does not exists.\n\n```\n{\n \"multi_match\" : {\n \"query\": \"this is a test\", \n \"fields\": [ \"subject\" ] \n }\n}\n```\n\nBut the following query will fail with an exception saying `\"No fields specified for multi_match query\"` if there is no field whose name starts with \"ms\" in the index being queried. Ideally it should return zero results too instead of failing right?\n\n```\n{\n \"multi_match\" : {\n \"query\": \"this is a test\", \n \"fields\": [ \"ms*\" ] \n }\n}\n```\n\nI have a use case where fields are dynamically added to the mapping and users can provide the field name to match search text on. Fields added to the mapping also have a metadata string appended to their names which the user may not be aware about. Hence I need to use wildcards in the field name in a `multi_match` query and there are chances that no field name matches too.\n", "comments": [ { "body": "I agree. Related to https://github.com/elastic/elasticsearch/issues/12016\n", "created_at": "2016-01-20T14:21:14Z" }, { "body": "I bumped into this too. Is there a workaround that I can apply until this issue has been solved?\n", "created_at": "2016-01-30T10:49:25Z" }, { "body": "Is this fixed now?\nI just pulled the code from master and the second query is returning zero results (I ensured that there is no field starting with 'ms').\n", "created_at": "2016-03-04T22:39:42Z" }, { "body": "@clintongormley Confirmed this is **not** reproducible on latest master with the following:\n\n```\nPOST test-index/test-type/test-doc1\n{\n \"test-key\":\"test-value\"\n}\n\nPOST test-index/_search\n{\n \"query\": {\n \"multi_match\": {\n \"query\": \"test-query\",\n \"fields\": [\n \"doesnt-match*\"\n ]\n }\n }\n}\nresults in 0 hits\n```\n\nHowever the same query fails on 2.3.2. Which release will the changes from #12016 land in?\n", "created_at": "2016-07-20T03:04:20Z" }, { "body": "This is fixed in master thanks to a massive refactoring of how searches are parsed - this obviously can't be backported. If somebody wants to contribute a quick fix for 2.4, then please submit a PR.\n", "created_at": "2016-07-21T10:35:29Z" }, { "body": "One workaround this that I've found is to add a dummy field to the `fields` specification, like `\"fields\": [\"dummydummydummy\", \"ms*\"]`, which will trigger the behavior on missing fields without causing the wildcard misfiring. Not sure if this makes the query significantly more expensive.\n", "created_at": "2016-11-04T15:43:37Z" }, { "body": "This issue seems to be fixed in 5.0.0. As far as I am concerned it can be closed.\n", "created_at": "2016-11-07T20:18:32Z" }, { "body": "Fixed in 5.0, closing\n", "created_at": "2016-11-08T09:08:39Z" } ], "number": 16098, "title": "Inconsistency in multi_match query" }
{ "body": "Fixed QueryParsingException in multi match query when a wildcard expression results in no fields.\nThe query will now return 0 hits (null query) instead of throwing an exception. This matches the behavior if a nonexistent field is specified.\nThese changes were backported from latest master (mostly from #13405).\n\nAll test pass when I ran `mvn clean verify`\n\nCloses #16098 \n", "number": 19564, "review_comments": [], "title": "Fixed QueryParsingException in multi match query" }
{ "commits": [ { "message": "Fixed QueryParsingException in multi match query when a wildcard expression results in no fields.\nThe query will now return 0 hits (null query) instead of throwing an exception. This matches the behavior if a nonexistent field is specified.\nThese changes were backported from latest master (mostly from #13405)." } ], "files": [ { "diff": "@@ -27,12 +27,14 @@\n import org.elasticsearch.common.regex.Regex;\n import org.elasticsearch.common.unit.Fuzziness;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.query.support.QueryParsers;\n import org.elasticsearch.index.search.MatchQuery;\n import org.elasticsearch.index.search.MultiMatchQuery;\n \n import java.io.IOException;\n import java.util.Map;\n+import java.util.TreeMap;\n \n /**\n * Same as {@link MatchQueryParser} but has support for multiple fields.\n@@ -73,10 +75,10 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n } else if (\"fields\".equals(currentFieldName)) {\n if (token == XContentParser.Token.START_ARRAY) {\n while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n- extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts);\n+ parseFieldAndBoost(parser, fieldNameWithBoosts);\n }\n } else if (token.isValue()) {\n- extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts);\n+ parseFieldAndBoost(parser, fieldNameWithBoosts);\n } else {\n throw new QueryParsingException(parseContext, \"[\" + NAME + \"] query does not support [\" + currentFieldName + \"]\");\n }\n@@ -160,7 +162,10 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n }\n }\n }\n- Query query = multiMatchQuery.parse(type, fieldNameWithBoosts, value, minimumShouldMatch);\n+\n+ Map<String, Float> newFieldsBoosts = handleFieldsMatchPattern(parseContext.mapperService(), fieldNameWithBoosts);\n+\n+ Query query = multiMatchQuery.parse(type, newFieldsBoosts, value, minimumShouldMatch);\n if (query == null) {\n return null;\n }\n@@ -172,7 +177,23 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n return query;\n }\n \n- private void extractFieldAndBoost(QueryParseContext parseContext, XContentParser parser, Map<String, Float> fieldNameWithBoosts) throws IOException {\n+ private static Map<String, Float> handleFieldsMatchPattern(MapperService mapperService, Map<String, Float> fieldsBoosts) {\n+ Map<String, Float> newFieldsBoosts = new TreeMap<>();\n+ for (Map.Entry<String, Float> fieldBoost : fieldsBoosts.entrySet()) {\n+ String fField = fieldBoost.getKey();\n+ Float fBoost = fieldBoost.getValue();\n+ if (Regex.isSimpleMatchPattern(fField)) {\n+ for (String field : mapperService.simpleMatchToIndexNames(fField)) {\n+ newFieldsBoosts.put(field, fBoost);\n+ }\n+ } else {\n+ newFieldsBoosts.put(fField, fBoost);\n+ }\n+ }\n+ return newFieldsBoosts;\n+ }\n+\n+ private static void parseFieldAndBoost(XContentParser parser, Map<String, Float> fieldsBoosts) throws IOException {\n String fField = null;\n Float fBoost = null;\n char[] fieldText = parser.textCharacters();\n@@ -188,13 +209,6 @@ private void extractFieldAndBoost(QueryParseContext parseContext, XContentParser\n if (fField == null) {\n fField = parser.text();\n }\n-\n- if (Regex.isSimpleMatchPattern(fField)) {\n- for (String field : parseContext.mapperService().simpleMatchToIndexNames(fField)) {\n- fieldNameWithBoosts.put(field, fBoost);\n- }\n- } else {\n- fieldNameWithBoosts.put(fField, fBoost);\n- }\n+ fieldsBoosts.put(fField, fBoost);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java", "status": "modified" }, { "diff": "@@ -221,7 +221,7 @@ public void testSingleField() throws NoSuchFieldException, IllegalAccessExceptio\n assertNoFailures(searchResponse);\n assertFirstHit(searchResponse, hasId(\"theone\"));\n \n- String[] fields = {\"full_name\", \"first_name\", \"last_name\", \"last_name_phrase\", \"first_name_phrase\", \"category_phrase\", \"category\"};\n+ String[] fields = {\"full_name\", \"first_name\", \"last_name\", \"last_name_phrase\", \"first_name_phrase\", \"category_phrase\", \"category\", \"missing_field\", \"missing_fields*\"};\n \n String[] query = {\"marvel\",\"hero\", \"captain\", \"america\", \"15\", \"17\", \"1\", \"5\", \"ultimate\", \"Man\",\n \"marvel\", \"wolferine\", \"ninja\"};", "filename": "core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java", "status": "modified" } ] }
{ "body": "I've been trying to make sense of Elasticssearch histogram aggregations the last couple of days. And I've found that they don't work as expected, or even advertised.\n\nLets say i want to aggregate like so:\n\n```\n\"aggregations\": {\n \"sea_water_temperature\": {\n \"histogram\": {\n \"field\": \"sea_water_temperature\",\n \"interval\": 3\n }\n }\n}\n```\n\nResponse buckets looks fine at first glance, but when trying to query for documents within the bounds of a bucket I don't get the same document count as the bucket suggested. E.g.\n\n```\n\"filter\": {\n \"range\": {\n \"sea_water_temperature\": {\n \"lt\": 0,\n \"gte\": -3\n }\n }\n}\n```\n\nThis could give x results while the bucket \"-3\" had a doc_count of y. This seems to only be an issue for negative bucket keys.\n\nIn the docs for histogram it states that the bucket key for a given value is:\n\n```\nrem = value % interval\nif (rem < 0) {\n rem += interval\n}\nbucket_key = value - rem\n```\n\nHowever I tried a term aggregation with that as a value script:\n\n```\n\"aggregations\": {\n \"sea_water_temperature\": {\n \"terms\": {\n \"field\": \"sea_water_temperature\",\n \"script\": \"rem = _value % interval; rem = rem < 0 ? rem + interval : rem; _value - rem\",\n \"params\": {\n \"interval\": 3\n }\n }\n }\n}\n```\n\nThat gives me the same kind of bucketing as histogram does but now my filter queries actually match the doc_counts of the buckets(!). Why isn't histogram working as described? or am I missing something?\n", "comments": [ { "body": "I can replicate this:\n\n```\nDELETE /_all \n\nPOST /t/t/_bulk\n{ \"create\": {}}\n{ \"t\": -3.9}\n{ \"create\": {}}\n{ \"t\": -2.6}\n{ \"create\": {}}\n{ \"t\": -1.4}\n{ \"create\": {}}\n{ \"t\": -0.2}\n{ \"create\": {}}\n{ \"t\": 0.2}\n{ \"create\": {}}\n{ \"t\": 2.4}\n{ \"create\": {}}\n{ \"t\": 2.8}\n{ \"create\": {}}\n{ \"t\": 3.9}\n\nGET /_search?search_type=count\n{\n \"aggs\": {\n \"NAME\": {\n \"histogram\": {\n \"field\": \"t\",\n \"interval\": 3\n }\n }\n }\n}\n```\n\nReturns:\n\n```\n\"aggregations\": {\n \"NAME\": {\n \"buckets\": [\n {\n \"key\": -3,\n \"doc_count\": 3\n },\n {\n \"key\": 0,\n \"doc_count\": 4\n },\n {\n \"key\": 3,\n \"doc_count\": 1\n }\n ]\n }\n}\n```\n", "created_at": "2014-10-16T14:43:05Z" }, { "body": "Can I contribute in any way?\n", "created_at": "2014-10-20T09:47:55Z" }, { "body": "@baelter The problem here is that the histogram casts floats to integers, which is why some negative numbers are being added to the wrong bucket. @jpountz has a solution here, he just needs to implement it.\n", "created_at": "2014-10-20T10:11:59Z" }, { "body": "It is a general limitation of histograms today that they do neither work on decimal values (because of negative values, as you noticed) nor on decimal intervals (intervals are required to be an integer). I agree that it would be nice to fix both issues.\n", "created_at": "2014-10-20T10:51:43Z" }, { "body": "It would, in the mean time, a term aggregation with histogram key function as a script will do as a temp fix.\n", "created_at": "2014-10-20T11:56:04Z" }, { "body": "For reference here is the related (though not duplicate) issue https://github.com/elasticsearch/elasticsearch/issues/4847\n", "created_at": "2014-10-20T11:56:39Z" }, { "body": "I would strongly recommend mentioning this in the docs until it is fixed. It's a subtly broken behavior and it would prevent more users losing time (or their minds! :)\n", "created_at": "2015-08-01T17:21:48Z" }, { "body": "@sylvinus agreed - feel like sending a docs PR?\n", "created_at": "2015-08-05T11:28:54Z" } ], "number": 8082, "title": "Histogram bucketing negative values incorrectly" }
{ "body": "Currently both aggregations really share the same implementation. This commit\nsplits the implementations so that regular histograms can support decimal\nintervals/offsets and compute correct buckets for negative decimal values.\n\nHowever the response API is still the same. So for intance both regular\nhistograms and date histograms will produce an\n`org.elasticsearch.search.aggregations.bucket.histogram.Histogram`\naggregation.\n\nThe optimization to compute an identifier of the rounded value and the\nrounded value itself has been removed since it was only used by regular\nhistograms, which now do the rounding themselves instead of relying on the\nRounding abstraction.\n\nCloses #8082\nCloses #4847\n", "number": 19551, "review_comments": [ { "body": "I think it is a bit strange to throw an AssertionError when assertions might not be enabled? Maybe just throw and IllegalArgumentException or a RuntimeException or ElasticsearchException or something?\n", "created_at": "2016-07-25T20:55:05Z" }, { "body": "Stray copy and paste?\n", "created_at": "2016-07-26T14:48:13Z" }, { "body": "`readOptionalWriteable`? I love making these methods shorter so they are easier to scan visually.\n", "created_at": "2016-07-26T14:49:35Z" }, { "body": "I think `InternalDateHistogram.NAME` would be a bit better. Now that type isn't used for serialization I'd like to remove it entirely one day.\n", "created_at": "2016-07-26T14:50:55Z" }, { "body": "Man this feels like a mess compared to ObjectParser. We can't do anything about it in the middle of this PR though. Just makes me sad.\n", "created_at": "2016-07-26T14:59:47Z" }, { "body": "Can you throw something else? It just makes me uncomfortable to throw AssertionError.\n", "created_at": "2016-07-26T15:01:34Z" }, { "body": "Why remove it? I was adding them because I thought it was nice to mark the constructors for anyone unfamiliar with Elasticsearch. It'd help them get their bearings.\n", "created_at": "2016-07-26T15:02:57Z" }, { "body": "`readOptionalWriteable` again?\n", "created_at": "2016-07-26T15:07:44Z" }, { "body": "`readOtionalWriteable` again?\n", "created_at": "2016-07-26T15:08:11Z" }, { "body": "Er, well, it doesn't work like that. Ignore.\n", "created_at": "2016-07-26T15:08:23Z" }, { "body": "Nice.\n", "created_at": "2016-07-26T15:14:20Z" }, { "body": "I think you can drop the `(long)`s because these are `double`s now.\n", "created_at": "2016-07-26T15:26:15Z" }, { "body": "bad end bracket.\n", "created_at": "2016-07-26T15:28:49Z" }, { "body": "We seem to be using this exception in a number of places already. I like that this is an error and not an exception as it means there is an error in the code rather than on the user end, and should not be caught?\n", "created_at": "2016-07-27T06:51:01Z" }, { "body": "indeed :)\n", "created_at": "2016-07-27T06:53:03Z" }, { "body": "agreed, it was not intentional and was probably lost while moving code\n", "created_at": "2016-07-27T06:58:06Z" }, { "body": "agreed, I wanted to avoid creating potential issues by changing serialization logic (this code was just moved) but I agree we should move to ObjectParser\n", "created_at": "2016-07-27T06:58:52Z" }, { "body": "Feel free to tell me I'm totally wrong - but do we feel confident enough that a mistake here (in code) is a good enough reason to cause the [node to restart](https://github.com/elastic/elasticsearch/pull/19272) ? I'm worried about endless restarts, where a single mistake on a specific API can cause problems on an entire node.\n", "created_at": "2016-07-27T07:00:15Z" }, { "body": "I wanted to denote the fact that the bound is half open, would you do it otherwise?\n", "created_at": "2016-07-27T07:02:46Z" }, { "body": "This is a good point, I am not sure. On the one hand, this is indeed a specific API and restarting is unlikely to fix anything, but on the other hand this is the same for stack overflows are memory leaks?\n", "created_at": "2016-07-27T07:16:11Z" }, { "body": "> for stack overflows are memory leaks?\n\nI'm not sure what you mean? can you unpack those examples?\n", "created_at": "2016-07-27T10:49:13Z" }, { "body": "ok, but since its abstract can we still document what its doing? Does it really require the insane generic typing?\n", "created_at": "2016-07-27T10:50:56Z" }, { "body": "this new file is missing some kind of explanation about what it does...\n", "created_at": "2016-07-27T10:52:32Z" }, { "body": "Can we fix this broken comment? Maybe we need something to fail the build, for files that contain this:\n\n```\n/**\n *\n */\n```\n\nWhat causes this? Is it Intellij?\n", "created_at": "2016-07-27T10:53:56Z" }, { "body": "This is a big antipattern: here we have two methods, both named interval(), just with different signatures. One is actually a getter, whereas the other changes state!\n\nBut you have no idea reading source code, what is going on, e.g. its just a method and you have to follow the source code to figure out what is happening. \n\n Please change to getInterval() and setInterval() [there are other instances of these in these files, obviously those should be fixed, too]\n", "created_at": "2016-07-27T10:56:37Z" }, { "body": "can we doc the new methods here too, since they are on an interface, they represent a contract.\n", "created_at": "2016-07-27T10:58:37Z" }, { "body": "I think if you throw a subclass of RuntimeException here it'll get bubbled back to the user and we'll get a bug report if it shows up. AssertionError's problem is that catching it and forwarding it around properly gets us dangerously close to catching OOMs. We probably could make an exception for catching AssertionError but I'm not sure it is worth it when we can just throw a RuntimeException of some sort instead.\n\nAlso it just feels weird to throw an assertion error from outside of assertions! Its icky 😨 \n", "created_at": "2016-07-27T12:39:44Z" }, { "body": "Yeah. It can and should wait.\n", "created_at": "2016-07-27T12:40:07Z" }, { "body": "I've never seen the `[0, interval[` notation before! I had no idea it was in an [ISO standard](https://en.wikipedia.org/wiki/ISO_31-11)! I used a wikipedia link because the standard is paywalled I think... That standard was superceded by [ISO 80000-2](http://www.ise.ncsu.edu/jwilson/files/mathsigns.pdf). That link is to a scan.... Anyway, it says to use the `[0, interval)` notation but that the `[0, interval[` notation \"is also used\".\n\nNow that I've done the 20 minutes of research I think you can use whatever you want. I learned the `[0, interval)` notation in school and thought that the backwards `[` on the end was a typo. Some folks will think that too but they too will learn something new that day!\n", "created_at": "2016-07-27T12:55:32Z" }, { "body": "For the record, the JVM code itself seems to be using AssertionError for impossible branches.\n\n> > for stack overflows are memory leaks?\n> \n> I'm not sure what you mean? can you unpack those examples?\n\nI mean that a specific API could trigger eg. a StackOverflowError, which will cause the node to shut down even though it does not prevent normal operations of the node otherwise.\n", "created_at": "2016-08-01T08:01:11Z" } ], "title": "Split regular histograms from date histograms." }
{ "commits": [ { "message": "Split regular histograms from date histograms. #19551\n\nCurrently both aggregations really share the same implementation. This commit\nsplits the implementations so that regular histograms can support decimal\nintervals/offsets and compute correct buckets for negative decimal values.\n\nHowever the response API is still the same. So for intance both regular\nhistograms and date histograms will produce an\n`org.elasticsearch.search.aggregations.bucket.histogram.Histogram`\naggregation.\n\nThe optimization to compute an identifier of the rounded value and the\nrounded value itself has been removed since it was only used by regular\nhistograms, which now do the rounding themselves instead of relying on the\nRounding abstraction.\n\nCloses #8082\nCloses #4847" } ], "files": [ { "diff": "@@ -35,24 +35,9 @@ public abstract class Rounding implements Streamable {\n public abstract byte id();\n \n /**\n- * Given a value, compute a key that uniquely identifies the rounded value although it is not necessarily equal to the rounding value itself.\n+ * Rounds the given value.\n */\n- public abstract long roundKey(long value);\n-\n- /**\n- * Compute the rounded value given the key that identifies it.\n- */\n- public abstract long valueForKey(long key);\n-\n- /**\n- * Rounds the given value, equivalent to calling <code>roundValue(roundKey(value))</code>.\n- *\n- * @param value The value to round.\n- * @return The rounded value.\n- */\n- public final long round(long value) {\n- return valueForKey(roundKey(value));\n- }\n+ public abstract long round(long value);\n \n /**\n * Given the rounded value (which was potentially generated by {@link #round(long)}, returns the next rounding value. For example, with\n@@ -112,13 +97,8 @@ public static long roundValue(long key, long interval) {\n }\n \n @Override\n- public long roundKey(long value) {\n- return roundKey(value, interval);\n- }\n-\n- @Override\n- public long valueForKey(long key) {\n- return key * interval;\n+ public long round(long value) {\n+ return roundKey(value, interval) * interval;\n }\n \n @Override\n@@ -179,13 +159,8 @@ public byte id() {\n }\n \n @Override\n- public long roundKey(long utcMillis) {\n- return rounding.roundKey((long) (factor * utcMillis));\n- }\n-\n- @Override\n- public long valueForKey(long key) {\n- return rounding.valueForKey(key);\n+ public long round(long utcMillis) {\n+ return rounding.round((long) (factor * utcMillis));\n }\n \n @Override\n@@ -248,13 +223,8 @@ public byte id() {\n }\n \n @Override\n- public long roundKey(long value) {\n- return rounding.roundKey(value - offset);\n- }\n-\n- @Override\n- public long valueForKey(long key) {\n- return offset + rounding.valueForKey(key);\n+ public long round(long value) {\n+ return rounding.round(value - offset) + offset;\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/common/rounding/Rounding.java", "status": "modified" }, { "diff": "@@ -31,6 +31,9 @@\n import java.util.Objects;\n \n /**\n+ * A rounding strategy for dates. It is typically used to group together dates\n+ * that are part of the same hour/day/month, taking into account time zones and\n+ * daylight saving times.\n */\n public abstract class TimeZoneRounding extends Rounding {\n public static final ParseField INTERVAL_FIELD = new ParseField(\"interval\");\n@@ -125,7 +128,7 @@ public byte id() {\n }\n \n @Override\n- public long roundKey(long utcMillis) {\n+ public long round(long utcMillis) {\n long rounded = field.roundFloor(utcMillis);\n if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {\n // in this case, we crossed a time zone transition. In some edge cases this will\n@@ -138,20 +141,14 @@ public long roundKey(long utcMillis) {\n return rounded;\n }\n \n- @Override\n- public long valueForKey(long time) {\n- assert roundKey(time) == time;\n- return time;\n- }\n-\n @Override\n public long nextRoundingValue(long utcMillis) {\n- long floor = roundKey(utcMillis);\n+ long floor = round(utcMillis);\n // add one unit and round to get to next rounded value\n- long next = roundKey(field.add(floor, 1));\n+ long next = round(field.add(floor, 1));\n if (next == floor) {\n // in rare case we need to add more than one unit\n- next = roundKey(field.add(floor, 2));\n+ next = round(field.add(floor, 2));\n }\n return next;\n }\n@@ -216,7 +213,7 @@ public byte id() {\n }\n \n @Override\n- public long roundKey(long utcMillis) {\n+ public long round(long utcMillis) {\n long timeLocal = timeZone.convertUTCToLocal(utcMillis);\n long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval);\n long roundedUTC;\n@@ -225,7 +222,7 @@ public long roundKey(long utcMillis) {\n // check if we crossed DST transition, in this case we want the last rounded value before the transition\n long transition = timeZone.previousTransition(utcMillis);\n if (transition != utcMillis && transition > roundedUTC) {\n- roundedUTC = roundKey(transition - 1);\n+ roundedUTC = round(transition - 1);\n }\n } else {\n /*\n@@ -276,12 +273,6 @@ private boolean isInDSTGap(long instantLocal) {\n return false;\n }\n \n- @Override\n- public long valueForKey(long time) {\n- assert roundKey(time) == time;\n- return time;\n- }\n-\n @Override\n public long nextRoundingValue(long time) {\n long timeLocal = time;", "filename": "core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java", "status": "modified" }, { "diff": "@@ -116,6 +116,7 @@\n import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser;\n import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.histogram.HistogramParser;\n+import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;\n import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;\n import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing;\n import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;\n@@ -546,7 +547,7 @@ private void registerBuiltinAggregations() {\n registerAggregation(new AggregationSpec(HistogramAggregationBuilder::new, new HistogramParser(),\n HistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalHistogram::new));\n registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder::new, new DateHistogramParser(),\n- DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD));\n+ DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalDateHistogram::new));\n registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder::new, new GeoDistanceParser(),\n GeoDistanceAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalGeoDistance::new));\n registerAggregation(new AggregationSpec(GeoGridAggregationBuilder::new, new GeoHashGridParser(),", "filename": "core/src/main/java/org/elasticsearch/search/SearchModule.java", "status": "modified" }, { "diff": "@@ -27,40 +27,91 @@\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n+import org.elasticsearch.search.aggregations.support.ValueType;\n+import org.elasticsearch.search.aggregations.support.ValuesSource;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n import java.util.Objects;\n \n-public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder<DateHistogramAggregationBuilder> {\n-\n+/**\n+ * A builder for histograms on date fields.\n+ */\n+public class DateHistogramAggregationBuilder\n+ extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, DateHistogramAggregationBuilder> {\n public static final String NAME = InternalDateHistogram.TYPE.name();\n public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME);\n \n+ private long interval;\n private DateHistogramInterval dateHistogramInterval;\n+ private long offset = 0;\n+ private ExtendedBounds extendedBounds;\n+ private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;\n+ private boolean keyed = false;\n+ private long minDocCount = 0;\n \n+ /** Create a new builder with the given name. */\n public DateHistogramAggregationBuilder(String name) {\n- super(name, InternalDateHistogram.HISTOGRAM_FACTORY);\n+ super(name, InternalDateHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DATE);\n }\n \n- /**\n- * Read from a stream.\n- */\n+ /** Read from a stream, for internal use only. */\n public DateHistogramAggregationBuilder(StreamInput in) throws IOException {\n- super(in, InternalDateHistogram.HISTOGRAM_FACTORY);\n+ super(in, InternalDateHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DATE);\n+ if (in.readBoolean()) {\n+ order = InternalOrder.Streams.readOrder(in);\n+ }\n+ keyed = in.readBoolean();\n+ minDocCount = in.readVLong();\n+ interval = in.readLong();\n dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new);\n+ offset = in.readLong();\n+ extendedBounds = in.readOptionalWriteable(ExtendedBounds::new);\n }\n \n @Override\n protected void innerWriteTo(StreamOutput out) throws IOException {\n- super.innerWriteTo(out);\n+ boolean hasOrder = order != null;\n+ out.writeBoolean(hasOrder);\n+ if (hasOrder) {\n+ InternalOrder.Streams.writeOrder(order, out);\n+ }\n+ out.writeBoolean(keyed);\n+ out.writeVLong(minDocCount);\n+ out.writeLong(interval);\n out.writeOptionalWriteable(dateHistogramInterval);\n+ out.writeLong(offset);\n+ out.writeOptionalWriteable(extendedBounds);\n+ }\n+\n+ /** Get the current interval in milliseconds that is set on this builder. */\n+ public double interval() {\n+ return interval;\n+ }\n+\n+ /** Set the interval on this builder, and return the builder so that calls can be chained.\n+ * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the\n+ * {@link #dateHistogramInterval()} wins. */\n+ public DateHistogramAggregationBuilder interval(long interval) {\n+ if (interval < 1) {\n+ throw new IllegalArgumentException(\"[interval] must be 1 or greater for histogram aggregation [\" + name + \"]\");\n+ }\n+ this.interval = interval;\n+ return this;\n+ }\n+\n+ /** Get the current date interval that is set on this builder. */\n+ public DateHistogramInterval dateHistogramInterval() {\n+ return dateHistogramInterval;\n }\n \n- /**\n- * Set the interval.\n- */\n+ /** Set the interval on this builder, and return the builder so that calls can be chained.\n+ * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the\n+ * {@link #dateHistogramInterval()} wins. */\n public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterval dateHistogramInterval) {\n if (dateHistogramInterval == null) {\n throw new IllegalArgumentException(\"[dateHistogramInterval] must not be null: [\" + name + \"]\");\n@@ -69,14 +120,28 @@ public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterv\n return this;\n }\n \n+ /** Get the offset to use when rounding, which is a number of milliseconds. */\n+ public double offset() {\n+ return offset;\n+ }\n+\n+ /** Set the offset on this builder, which is a number of milliseconds, and\n+ * return the builder so that calls can be chained. */\n+ public DateHistogramAggregationBuilder offset(long offset) {\n+ this.offset = offset;\n+ return this;\n+ }\n+\n+ /** Set the offset on this builder, as a time value, and\n+ * return the builder so that calls can be chained. */\n public DateHistogramAggregationBuilder offset(String offset) {\n if (offset == null) {\n throw new IllegalArgumentException(\"[offset] must not be null: [\" + name + \"]\");\n }\n return offset(parseStringOffset(offset));\n }\n \n- protected static long parseStringOffset(String offset) {\n+ static long parseStringOffset(String offset) {\n if (offset.charAt(0) == '-') {\n return -TimeValue\n .parseTimeValue(offset.substring(1), null, DateHistogramAggregationBuilder.class.getSimpleName() + \".parseOffset\")\n@@ -88,40 +153,118 @@ protected static long parseStringOffset(String offset) {\n .millis();\n }\n \n- public DateHistogramInterval dateHistogramInterval() {\n- return dateHistogramInterval;\n+ /** Return extended bounds for this histogram, or {@code null} if none are set. */\n+ public ExtendedBounds extendedBounds() {\n+ return extendedBounds;\n }\n \n- @Override\n- protected DateHistogramAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,\n- AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {\n- return new DateHistogramAggregatorFactory(name, type, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount,\n- extendedBounds, context, parent, subFactoriesBuilder, metaData);\n+ /** Set extended bounds on this histogram, so that buckets would also be\n+ * generated on intervals that did not match any documents. */\n+ public DateHistogramAggregationBuilder extendedBounds(ExtendedBounds extendedBounds) {\n+ if (extendedBounds == null) {\n+ throw new IllegalArgumentException(\"[extendedBounds] must not be null: [\" + name + \"]\");\n+ }\n+ this.extendedBounds = extendedBounds;\n+ return this;\n }\n \n- @Override\n- public String getWriteableName() {\n- return NAME;\n+ /** Return the order to use to sort buckets of this histogram. */\n+ public Histogram.Order order() {\n+ return order;\n+ }\n+\n+ /** Set a new order on this builder and return the builder so that calls\n+ * can be chained. */\n+ public DateHistogramAggregationBuilder order(Histogram.Order order) {\n+ if (order == null) {\n+ throw new IllegalArgumentException(\"[order] must not be null: [\" + name + \"]\");\n+ }\n+ this.order = (InternalOrder) order;\n+ return this;\n+ }\n+\n+ /** Return whether buckets should be returned as a hash. In case\n+ * {@code keyed} is false, buckets will be returned as an array. */\n+ public boolean keyed() {\n+ return keyed;\n+ }\n+\n+ /** Set whether to return buckets as a hash or as an array, and return the\n+ * builder so that calls can be chained. */\n+ public DateHistogramAggregationBuilder keyed(boolean keyed) {\n+ this.keyed = keyed;\n+ return this;\n+ }\n+\n+ /** Return the minimum count of documents that buckets need to have in order\n+ * to be included in the response. */\n+ public long minDocCount() {\n+ return minDocCount;\n+ }\n+\n+ /** Set the minimum count of matching documents that buckets need to have\n+ * and return this builder so that calls can be chained. */\n+ public DateHistogramAggregationBuilder minDocCount(long minDocCount) {\n+ if (minDocCount < 0) {\n+ throw new IllegalArgumentException(\n+ \"[minDocCount] must be greater than or equal to 0. Found [\" + minDocCount + \"] in [\" + name + \"]\");\n+ }\n+ this.minDocCount = minDocCount;\n+ return this;\n }\n \n @Override\n- protected XContentBuilder doXContentInterval(XContentBuilder builder, Params params) throws IOException {\n+ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {\n+\n if (dateHistogramInterval == null) {\n- super.doXContentInterval(builder, params);\n+ builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval);\n } else {\n- builder.value(dateHistogramInterval.toString());\n+ builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString());\n+ }\n+ builder.field(Histogram.OFFSET_FIELD.getPreferredName(), offset);\n+\n+ if (order != null) {\n+ builder.field(Histogram.ORDER_FIELD.getPreferredName());\n+ order.toXContent(builder, params);\n }\n+\n+ builder.field(Histogram.KEYED_FIELD.getPreferredName(), keyed);\n+\n+ builder.field(Histogram.MIN_DOC_COUNT_FIELD.getPreferredName(), minDocCount);\n+\n+ if (extendedBounds != null) {\n+ extendedBounds.toXContent(builder, params);\n+ }\n+\n return builder;\n }\n \n+ @Override\n+ public String getWriteableName() {\n+ return NAME;\n+ }\n+\n+ @Override\n+ protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,\n+ AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {\n+ return new DateHistogramAggregatorFactory(name, type, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount,\n+ extendedBounds, context, parent, subFactoriesBuilder, metaData);\n+ }\n+\n @Override\n protected int innerHashCode() {\n- return Objects.hash(super.innerHashCode(), dateHistogramInterval);\n+ return Objects.hash(order, keyed, minDocCount, interval, dateHistogramInterval, minDocCount, extendedBounds);\n }\n \n @Override\n protected boolean innerEquals(Object obj) {\n DateHistogramAggregationBuilder other = (DateHistogramAggregationBuilder) obj;\n- return super.innerEquals(obj) && Objects.equals(dateHistogramInterval, other.dateHistogramInterval);\n+ return Objects.equals(order, other.order)\n+ && Objects.equals(keyed, other.keyed)\n+ && Objects.equals(minDocCount, other.minDocCount)\n+ && Objects.equals(interval, other.interval)\n+ && Objects.equals(dateHistogramInterval, other.dateHistogramInterval)\n+ && Objects.equals(offset, other.offset)\n+ && Objects.equals(extendedBounds, other.extendedBounds);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -0,0 +1,153 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.search.aggregations.bucket.histogram;\n+\n+import org.apache.lucene.index.LeafReaderContext;\n+import org.apache.lucene.index.SortedNumericDocValues;\n+import org.apache.lucene.util.CollectionUtil;\n+import org.elasticsearch.common.inject.internal.Nullable;\n+import org.elasticsearch.common.lease.Releasables;\n+import org.elasticsearch.common.rounding.Rounding;\n+import org.elasticsearch.common.rounding.TimeZoneRounding;\n+import org.elasticsearch.common.util.LongHash;\n+import org.elasticsearch.search.DocValueFormat;\n+import org.elasticsearch.search.aggregations.Aggregator;\n+import org.elasticsearch.search.aggregations.AggregatorFactories;\n+import org.elasticsearch.search.aggregations.InternalAggregation;\n+import org.elasticsearch.search.aggregations.LeafBucketCollector;\n+import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;\n+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;\n+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n+import org.elasticsearch.search.aggregations.support.AggregationContext;\n+import org.elasticsearch.search.aggregations.support.ValuesSource;\n+\n+import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.List;\n+import java.util.Map;\n+\n+/**\n+ * An aggregator for date values. Every date is rounded down using a configured\n+ * {@link TimeZoneRounding}.\n+ * @see TimeZoneRounding\n+ */\n+class DateHistogramAggregator extends BucketsAggregator {\n+\n+ private final ValuesSource.Numeric valuesSource;\n+ private final DocValueFormat formatter;\n+ private final Rounding rounding;\n+ private final InternalOrder order;\n+ private final boolean keyed;\n+\n+ private final long minDocCount;\n+ private final ExtendedBounds extendedBounds;\n+\n+ private final LongHash bucketOrds;\n+\n+ public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed,\n+ long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,\n+ DocValueFormat formatter, AggregationContext aggregationContext,\n+ Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n+\n+ super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);\n+ this.rounding = rounding;\n+ this.order = order;\n+ this.keyed = keyed;\n+ this.minDocCount = minDocCount;\n+ this.extendedBounds = extendedBounds;\n+ this.valuesSource = valuesSource;\n+ this.formatter = formatter;\n+\n+ bucketOrds = new LongHash(1, aggregationContext.bigArrays());\n+ }\n+\n+ @Override\n+ public boolean needsScores() {\n+ return (valuesSource != null && valuesSource.needsScores()) || super.needsScores();\n+ }\n+\n+ @Override\n+ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,\n+ final LeafBucketCollector sub) throws IOException {\n+ if (valuesSource == null) {\n+ return LeafBucketCollector.NO_OP_COLLECTOR;\n+ }\n+ final SortedNumericDocValues values = valuesSource.longValues(ctx);\n+ return new LeafBucketCollectorBase(sub, values) {\n+ @Override\n+ public void collect(int doc, long bucket) throws IOException {\n+ assert bucket == 0;\n+ values.setDocument(doc);\n+ final int valuesCount = values.count();\n+\n+ long previousRounded = Long.MIN_VALUE;\n+ for (int i = 0; i < valuesCount; ++i) {\n+ long value = values.valueAt(i);\n+ long rounded = rounding.round(value);\n+ assert rounded >= previousRounded;\n+ if (rounded == previousRounded) {\n+ continue;\n+ }\n+ long bucketOrd = bucketOrds.add(rounded);\n+ if (bucketOrd < 0) { // already seen\n+ bucketOrd = -1 - bucketOrd;\n+ collectExistingBucket(sub, doc, bucketOrd);\n+ } else {\n+ collectBucket(sub, doc, bucketOrd);\n+ }\n+ previousRounded = rounded;\n+ }\n+ }\n+ };\n+ }\n+\n+ @Override\n+ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {\n+ assert owningBucketOrdinal == 0;\n+ List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());\n+ for (long i = 0; i < bucketOrds.size(); i++) {\n+ buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i)));\n+ }\n+\n+ // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order\n+ CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator());\n+\n+ // value source will be null for unmapped fields\n+ InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0\n+ ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)\n+ : null;\n+ return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed,\n+ pipelineAggregators(), metaData());\n+ }\n+\n+ @Override\n+ public InternalAggregation buildEmptyAggregation() {\n+ InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0\n+ ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)\n+ : null;\n+ return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed,\n+ pipelineAggregators(), metaData());\n+ }\n+\n+ @Override\n+ public void doClose() {\n+ Releasables.close(bucketOrds);\n+ }\n+}", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java", "status": "added" }, { "diff": "@@ -23,23 +23,29 @@\n import org.elasticsearch.common.rounding.Rounding;\n import org.elasticsearch.common.rounding.TimeZoneRounding;\n import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.search.aggregations.Aggregator;\n import org.elasticsearch.search.aggregations.AggregatorFactories;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;\n \n import java.io.IOException;\n import java.util.HashMap;\n+import java.util.List;\n import java.util.Map;\n \n import static java.util.Collections.unmodifiableMap;\n \n import org.elasticsearch.search.aggregations.support.AggregationContext;\n+import org.elasticsearch.search.aggregations.support.ValuesSource;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n \n-public class DateHistogramAggregatorFactory extends AbstractHistogramAggregatorFactory<DateHistogramAggregatorFactory> {\n+public final class DateHistogramAggregatorFactory\n+ extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, DateHistogramAggregatorFactory> {\n+\n public static final Map<String, DateTimeUnit> DATE_FIELD_UNITS;\n- private final DateHistogramInterval dateHistogramInterval;\n \n static {\n Map<String, DateTimeUnit> dateFieldUnits = new HashMap<>();\n@@ -62,17 +68,33 @@ public class DateHistogramAggregatorFactory extends AbstractHistogramAggregatorF\n DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits);\n }\n \n+ private final DateHistogramInterval dateHistogramInterval;\n+ private final long interval;\n+ private final long offset;\n+ private final InternalOrder order;\n+ private final boolean keyed;\n+ private final long minDocCount;\n+ private final ExtendedBounds extendedBounds;\n+\n public DateHistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, long interval,\n DateHistogramInterval dateHistogramInterval, long offset, InternalOrder order, boolean keyed, long minDocCount,\n ExtendedBounds extendedBounds, AggregationContext context, AggregatorFactory<?> parent,\n AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {\n- super(name, type, config, interval, offset, order, keyed, minDocCount, extendedBounds, InternalDateHistogram.HISTOGRAM_FACTORY,\n- context, parent, subFactoriesBuilder, metaData);\n+ super(name, type, config, context, parent, subFactoriesBuilder, metaData);\n+ this.interval = interval;\n this.dateHistogramInterval = dateHistogramInterval;\n+ this.offset = offset;\n+ this.order = order;\n+ this.keyed = keyed;\n+ this.minDocCount = minDocCount;\n+ this.extendedBounds = extendedBounds;\n }\n \n- @Override\n- protected Rounding createRounding() {\n+ public long minDocCount() {\n+ return minDocCount;\n+ }\n+\n+ private Rounding createRounding() {\n TimeZoneRounding.Builder tzRoundingBuilder;\n if (dateHistogramInterval != null) {\n DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString());\n@@ -94,4 +116,35 @@ protected Rounding createRounding() {\n return rounding;\n }\n \n+ @Override\n+ protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,\n+ List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n+ if (collectsFromSingleBucket == false) {\n+ return asMultiBucketAggregator(this, context, parent);\n+ }\n+ return createAggregator(valuesSource, parent, pipelineAggregators, metaData);\n+ }\n+\n+ private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,\n+ Map<String, Object> metaData) throws IOException {\n+ Rounding rounding = createRounding();\n+ // we need to round the bounds given by the user and we have to do it\n+ // for every aggregator we create\n+ // as the rounding is not necessarily an idempotent operation.\n+ // todo we need to think of a better structure to the factory/agtor\n+ // code so we won't need to do that\n+ ExtendedBounds roundedBounds = null;\n+ if (extendedBounds != null) {\n+ // parse any string bounds to longs and round them\n+ roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding);\n+ }\n+ return new DateHistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource,\n+ config.format(), context, parent, pipelineAggregators, metaData);\n+ }\n+\n+ @Override\n+ protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)\n+ throws IOException {\n+ return createAggregator(null, parent, pipelineAggregators, metaData);\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -19,26 +19,26 @@\n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n import org.elasticsearch.common.ParseField;\n+import org.elasticsearch.common.ParseFieldMatcher;\n import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.rounding.Rounding;\n+import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.common.xcontent.XContentParser.Token;\n+import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;\n import org.elasticsearch.search.aggregations.support.ValueType;\n import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n import java.util.Map;\n \n /**\n- *\n+ * A parser for date histograms. This translates json into a\n+ * {@link DateHistogramAggregationBuilder} instance.\n */\n-public class DateHistogramParser extends HistogramParser {\n+public class DateHistogramParser extends NumericValuesSourceParser {\n \n public DateHistogramParser() {\n- super(true);\n- }\n-\n- @Override\n- protected Object parseStringInterval(String text) {\n- return new DateHistogramInterval(text);\n+ super(true, true, true);\n }\n \n @Override\n@@ -52,6 +52,8 @@ protected DateHistogramAggregationBuilder createFactory(String aggregationName,\n factory.interval((Long) interval);\n } else if (interval instanceof DateHistogramInterval) {\n factory.dateHistogramInterval((DateHistogramInterval) interval);\n+ } else {\n+ throw new IllegalStateException(\"Unexpected interval class: \" + interval.getClass());\n }\n Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD);\n if (offset != null) {\n@@ -62,21 +64,85 @@ protected DateHistogramAggregationBuilder createFactory(String aggregationName,\n if (extendedBounds != null) {\n factory.extendedBounds(extendedBounds);\n }\n- Boolean keyed = (Boolean) otherOptions.get(HistogramAggregator.KEYED_FIELD);\n+ Boolean keyed = (Boolean) otherOptions.get(Histogram.KEYED_FIELD);\n if (keyed != null) {\n factory.keyed(keyed);\n }\n- Long minDocCount = (Long) otherOptions.get(HistogramAggregator.MIN_DOC_COUNT_FIELD);\n+ Long minDocCount = (Long) otherOptions.get(Histogram.MIN_DOC_COUNT_FIELD);\n if (minDocCount != null) {\n factory.minDocCount(minDocCount);\n }\n- InternalOrder order = (InternalOrder) otherOptions.get(HistogramAggregator.ORDER_FIELD);\n+ InternalOrder order = (InternalOrder) otherOptions.get(Histogram.ORDER_FIELD);\n if (order != null) {\n factory.order(order);\n }\n return factory;\n }\n \n+ @Override\n+ protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,\n+ ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {\n+ if (token.isValue()) {\n+ if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) {\n+ if (token == XContentParser.Token.VALUE_STRING) {\n+ otherOptions.put(Rounding.Interval.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));\n+ return true;\n+ } else {\n+ otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue());\n+ return true;\n+ }\n+ } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {\n+ otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue());\n+ return true;\n+ } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {\n+ otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());\n+ return true;\n+ } else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) {\n+ if (token == XContentParser.Token.VALUE_STRING) {\n+ otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD,\n+ DateHistogramAggregationBuilder.parseStringOffset(parser.text()));\n+ return true;\n+ } else {\n+ otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue());\n+ return true;\n+ }\n+ } else {\n+ return false;\n+ }\n+ } else if (token == XContentParser.Token.START_OBJECT) {\n+ if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) {\n+ InternalOrder order = null;\n+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ currentFieldName = parser.currentName();\n+ } else if (token == XContentParser.Token.VALUE_STRING) {\n+ String dir = parser.text();\n+ boolean asc = \"asc\".equals(dir);\n+ if (!asc && !\"desc\".equals(dir)) {\n+ throw new ParsingException(parser.getTokenLocation(), \"Unknown order direction in aggregation [\"\n+ + aggregationName + \"]: [\" + dir\n+ + \"]. Should be either [asc] or [desc]\");\n+ }\n+ order = resolveOrder(currentFieldName, asc);\n+ }\n+ }\n+ otherOptions.put(Histogram.ORDER_FIELD, order);\n+ return true;\n+ } else if (parseFieldMatcher.match(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) {\n+ try {\n+ otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, ExtendedBounds.PARSER.apply(parser, () -> parseFieldMatcher));\n+ } catch (Exception e) {\n+ throw new ParsingException(parser.getTokenLocation(), \"Error parsing [{}]\", e, aggregationName);\n+ }\n+ return true;\n+ } else {\n+ return false;\n+ }\n+ } else {\n+ return false;\n+ }\n+ }\n+\n static InternalOrder resolveOrder(String key, boolean asc) {\n if (\"_key\".equals(key) || \"_time\".equals(key)) {\n return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);\n@@ -86,9 +152,4 @@ static InternalOrder resolveOrder(String key, boolean asc) {\n }\n return new InternalOrder.Aggregation(key, asc);\n }\n-\n- @Override\n- protected long parseStringOffset(String offset) throws IOException {\n- return DateHistogramAggregationBuilder.parseStringOffset(offset);\n- }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java", "status": "modified" }, { "diff": "@@ -41,7 +41,7 @@\n import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;\n \n public class ExtendedBounds implements ToXContent, Writeable {\n- static final ParseField EXTENDED_BOUNDS_FIELD = new ParseField(\"extended_bounds\");\n+ static final ParseField EXTENDED_BOUNDS_FIELD = Histogram.EXTENDED_BOUNDS_FIELD;\n static final ParseField MIN_FIELD = new ParseField(\"min\");\n static final ParseField MAX_FIELD = new ParseField(\"max\");\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n+import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n \n@@ -29,6 +30,13 @@\n */\n public interface Histogram extends MultiBucketsAggregation {\n \n+ ParseField INTERVAL_FIELD = new ParseField(\"interval\");\n+ ParseField OFFSET_FIELD = new ParseField(\"offset\");\n+ ParseField ORDER_FIELD = new ParseField(\"order\");\n+ ParseField KEYED_FIELD = new ParseField(\"keyed\");\n+ ParseField MIN_DOC_COUNT_FIELD = new ParseField(\"min_doc_count\");\n+ ParseField EXTENDED_BOUNDS_FIELD = new ParseField(\"extended_bounds\");\n+\n /**\n * A bucket in the histogram where documents fall in\n */\n@@ -40,46 +48,56 @@ interface Bucket extends MultiBucketsAggregation.Bucket {\n * @return The buckets of this histogram (each bucket representing an interval in the histogram)\n */\n @Override\n- List<? extends Bucket> getBuckets();\n+ List<Bucket> getBuckets();\n \n \n /**\n * A strategy defining the order in which the buckets in this histogram are ordered.\n */\n abstract class Order implements ToXContent {\n \n- public static final Order KEY_ASC = new InternalOrder((byte) 1, \"_key\", true, new Comparator<InternalHistogram.Bucket>() {\n+ private static int compareKey(Histogram.Bucket b1, Histogram.Bucket b2) {\n+ if (b1 instanceof InternalHistogram.Bucket) {\n+ return Double.compare(((InternalHistogram.Bucket) b1).key, ((InternalHistogram.Bucket) b2).key);\n+ } else if (b1 instanceof InternalDateHistogram.Bucket) {\n+ return Long.compare(((InternalDateHistogram.Bucket) b1).key, ((InternalDateHistogram.Bucket) b2).key);\n+ } else {\n+ throw new IllegalStateException(\"Unexpected impl: \" + b1.getClass());\n+ }\n+ }\n+\n+ public static final Order KEY_ASC = new InternalOrder((byte) 1, \"_key\", true, new Comparator<Histogram.Bucket>() {\n @Override\n- public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {\n- return Long.compare(b1.key, b2.key);\n+ public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {\n+ return compareKey(b1, b2);\n }\n });\n \n- public static final Order KEY_DESC = new InternalOrder((byte) 2, \"_key\", false, new Comparator<InternalHistogram.Bucket>() {\n+ public static final Order KEY_DESC = new InternalOrder((byte) 2, \"_key\", false, new Comparator<Histogram.Bucket>() {\n @Override\n- public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {\n- return -Long.compare(b1.key, b2.key);\n+ public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {\n+ return compareKey(b2, b1);\n }\n });\n \n- public static final Order COUNT_ASC = new InternalOrder((byte) 3, \"_count\", true, new Comparator<InternalHistogram.Bucket>() {\n+ public static final Order COUNT_ASC = new InternalOrder((byte) 3, \"_count\", true, new Comparator<Histogram.Bucket>() {\n @Override\n- public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {\n+ public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {\n int cmp = Long.compare(b1.getDocCount(), b2.getDocCount());\n if (cmp == 0) {\n- cmp = Long.compare(b1.key, b2.key);\n+ cmp = compareKey(b1, b2);\n }\n return cmp;\n }\n });\n \n \n- public static final Order COUNT_DESC = new InternalOrder((byte) 4, \"_count\", false, new Comparator<InternalHistogram.Bucket>() {\n+ public static final Order COUNT_DESC = new InternalOrder((byte) 4, \"_count\", false, new Comparator<Histogram.Bucket>() {\n @Override\n- public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {\n- int cmp = -Long.compare(b1.getDocCount(), b2.getDocCount());\n+ public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {\n+ int cmp = Long.compare(b2.getDocCount(), b1.getDocCount());\n if (cmp == 0) {\n- cmp = Long.compare(b1.key, b2.key);\n+ cmp = compareKey(b1, b2);\n }\n return cmp;\n }\n@@ -109,7 +127,7 @@ public static Order aggregation(String aggregationName, String valueName, boolea\n /**\n * @return The bucket comparator by which the order will be applied.\n */\n- abstract Comparator<InternalHistogram.Bucket> comparator();\n+ abstract Comparator<Histogram.Bucket> comparator();\n \n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java", "status": "modified" }, { "diff": "@@ -21,38 +21,224 @@\n \n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.io.stream.StreamInput;\n+import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n+import org.elasticsearch.search.aggregations.support.ValueType;\n+import org.elasticsearch.search.aggregations.support.ValuesSource;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceType;\n \n import java.io.IOException;\n+import java.util.Objects;\n \n-public class HistogramAggregationBuilder extends AbstractHistogramBuilder<HistogramAggregationBuilder> {\n+/**\n+ * A builder for histograms on numeric fields.\n+ */\n+public class HistogramAggregationBuilder\n+ extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, HistogramAggregationBuilder> {\n public static final String NAME = InternalHistogram.TYPE.name();\n public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME);\n \n+ private double interval;\n+ private double offset = 0;\n+ private double minBound = Double.MAX_VALUE;\n+ private double maxBound = Double.MIN_VALUE;\n+ private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;\n+ private boolean keyed = false;\n+ private long minDocCount = 0;\n+\n+ /** Create a new builder with the given name. */\n public HistogramAggregationBuilder(String name) {\n- super(name, InternalHistogram.HISTOGRAM_FACTORY);\n+ super(name, InternalHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DOUBLE);\n }\n \n- /**\n- * Read from a stream.\n- */\n+ /** Read from a stream, for internal use only. */\n public HistogramAggregationBuilder(StreamInput in) throws IOException {\n- super(in, InternalHistogram.HISTOGRAM_FACTORY);\n+ super(in, InternalHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DOUBLE);\n+ if (in.readBoolean()) {\n+ order = InternalOrder.Streams.readOrder(in);\n+ }\n+ keyed = in.readBoolean();\n+ minDocCount = in.readVLong();\n+ interval = in.readDouble();\n+ offset = in.readDouble();\n+ minBound = in.readDouble();\n+ maxBound = in.readDouble();\n }\n \n @Override\n- protected HistogramAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,\n- AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {\n- return new HistogramAggregatorFactory(name, type, config, interval, offset, order, keyed, minDocCount, extendedBounds, context,\n- parent, subFactoriesBuilder, metaData);\n+ protected void innerWriteTo(StreamOutput out) throws IOException {\n+ boolean hasOrder = order != null;\n+ out.writeBoolean(hasOrder);\n+ if (hasOrder) {\n+ InternalOrder.Streams.writeOrder(order, out);\n+ }\n+ out.writeBoolean(keyed);\n+ out.writeVLong(minDocCount);\n+ out.writeDouble(interval);\n+ out.writeDouble(offset);\n+ out.writeDouble(minBound);\n+ out.writeDouble(maxBound);\n+ }\n+\n+ /** Get the current interval that is set on this builder. */\n+ public double interval() {\n+ return interval;\n+ }\n+\n+ /** Set the interval on this builder, and return the builder so that calls can be chained. */\n+ public HistogramAggregationBuilder interval(double interval) {\n+ if (interval <= 0) {\n+ throw new IllegalArgumentException(\"[interval] must be >0 for histogram aggregation [\" + name + \"]\");\n+ }\n+ this.interval = interval;\n+ return this;\n+ }\n+\n+ /** Get the current offset that is set on this builder. */\n+ public double offset() {\n+ return offset;\n+ }\n+\n+ /** Set the offset on this builder, and return the builder so that calls can be chained. */\n+ public HistogramAggregationBuilder offset(double offset) {\n+ this.offset = offset;\n+ return this;\n+ }\n+\n+ /** Get the current minimum bound that is set on this builder. */\n+ public double minBound() {\n+ return minBound;\n+ }\n+\n+ /** Get the current maximum bound that is set on this builder. */\n+ public double maxBound() {\n+ return maxBound;\n+ }\n+\n+ /** Set extended bounds on this builder: buckets between {@code minBound}\n+ * and {@code maxBound} will be created even if no documents fell into\n+ * these buckets. It is possible to create half-open bounds by providing\n+ * {@link Double#POSITIVE_INFINITY} as a {@code minBound} or \n+ * {@link Double#NEGATIVE_INFINITY} as a {@code maxBound}. */\n+ public HistogramAggregationBuilder extendedBounds(double minBound, double maxBound) {\n+ if (minBound == Double.NEGATIVE_INFINITY) {\n+ throw new IllegalArgumentException(\"minBound must not be -Infinity, got: \" + minBound);\n+ }\n+ if (maxBound == Double.POSITIVE_INFINITY) {\n+ throw new IllegalArgumentException(\"maxBound must not be +Infinity, got: \" + maxBound);\n+ }\n+ this.minBound = minBound;\n+ this.maxBound = maxBound;\n+ return this;\n+ }\n+\n+ /** Return the order to use to sort buckets of this histogram. */\n+ public Histogram.Order order() {\n+ return order;\n+ }\n+\n+ /** Set a new order on this builder and return the builder so that calls\n+ * can be chained. */\n+ public HistogramAggregationBuilder order(Histogram.Order order) {\n+ if (order == null) {\n+ throw new IllegalArgumentException(\"[order] must not be null: [\" + name + \"]\");\n+ }\n+ this.order = (InternalOrder) order;\n+ return this;\n+ }\n+\n+ /** Return whether buckets should be returned as a hash. In case\n+ * {@code keyed} is false, buckets will be returned as an array. */\n+ public boolean keyed() {\n+ return keyed;\n+ }\n+\n+ /** Set whether to return buckets as a hash or as an array, and return the\n+ * builder so that calls can be chained. */\n+ public HistogramAggregationBuilder keyed(boolean keyed) {\n+ this.keyed = keyed;\n+ return this;\n+ }\n+\n+ /** Return the minimum count of documents that buckets need to have in order\n+ * to be included in the response. */\n+ public long minDocCount() {\n+ return minDocCount;\n+ }\n+\n+ /** Set the minimum count of matching documents that buckets need to have\n+ * and return this builder so that calls can be chained. */\n+ public HistogramAggregationBuilder minDocCount(long minDocCount) {\n+ if (minDocCount < 0) {\n+ throw new IllegalArgumentException(\n+ \"[minDocCount] must be greater than or equal to 0. Found [\" + minDocCount + \"] in [\" + name + \"]\");\n+ }\n+ this.minDocCount = minDocCount;\n+ return this;\n+ }\n+\n+ @Override\n+ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {\n+\n+ builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval);\n+ builder.field(Histogram.OFFSET_FIELD.getPreferredName(), offset);\n+\n+ if (order != null) {\n+ builder.field(Histogram.ORDER_FIELD.getPreferredName());\n+ order.toXContent(builder, params);\n+ }\n+\n+ builder.field(Histogram.KEYED_FIELD.getPreferredName(), keyed);\n+\n+ builder.field(Histogram.MIN_DOC_COUNT_FIELD.getPreferredName(), minDocCount);\n+\n+ if (Double.isFinite(minBound) || Double.isFinite(maxBound)) {\n+ builder.startObject(Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName());\n+ if (Double.isFinite(minBound)) {\n+ builder.field(\"min\", minBound);\n+ }\n+ if (Double.isFinite(maxBound)) {\n+ builder.field(\"max\", maxBound);\n+ }\n+ builder.endObject();\n+ }\n+\n+ return builder;\n }\n \n @Override\n public String getWriteableName() {\n- return NAME;\n+ return InternalHistogram.TYPE.name();\n+ }\n+\n+ @Override\n+ protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,\n+ AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {\n+ return new HistogramAggregatorFactory(name, type, config, interval, offset, order, keyed, minDocCount, minBound, maxBound,\n+ context, parent, subFactoriesBuilder, metaData);\n+ }\n+\n+ @Override\n+ protected int innerHashCode() {\n+ return Objects.hash(order, keyed, minDocCount, interval, offset, minBound, maxBound);\n+ }\n+\n+ @Override\n+ protected boolean innerEquals(Object obj) {\n+ HistogramAggregationBuilder other = (HistogramAggregationBuilder) obj;\n+ return Objects.equals(order, other.order)\n+ && Objects.equals(keyed, other.keyed)\n+ && Objects.equals(minDocCount, other.minDocCount)\n+ && Objects.equals(interval, other.interval)\n+ && Objects.equals(offset, other.offset)\n+ && Objects.equals(minBound, other.minBound)\n+ && Objects.equals(maxBound, other.maxBound);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -16,23 +16,23 @@\n * specific language governing permissions and limitations\n * under the License.\n */\n+\n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n import org.apache.lucene.index.LeafReaderContext;\n-import org.apache.lucene.index.SortedNumericDocValues;\n import org.apache.lucene.util.CollectionUtil;\n-import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.inject.internal.Nullable;\n import org.elasticsearch.common.lease.Releasables;\n-import org.elasticsearch.common.rounding.Rounding;\n import org.elasticsearch.common.util.LongHash;\n+import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.Aggregator;\n import org.elasticsearch.search.aggregations.AggregatorFactories;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.LeafBucketCollector;\n import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;\n import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;\n+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n import org.elasticsearch.search.aggregations.support.ValuesSource;\n@@ -43,38 +43,43 @@\n import java.util.List;\n import java.util.Map;\n \n-public class HistogramAggregator extends BucketsAggregator {\n-\n- public static final ParseField ORDER_FIELD = new ParseField(\"order\");\n- public static final ParseField KEYED_FIELD = new ParseField(\"keyed\");\n- public static final ParseField MIN_DOC_COUNT_FIELD = new ParseField(\"min_doc_count\");\n+/**\n+ * An aggregator for numeric values. For a given {@code interval},\n+ * {@code offset} and {@code value}, it returns the highest number that can be\n+ * written as {@code interval * x + offset} and yet is less than or equal to\n+ * {@code value}.\n+ */\n+class HistogramAggregator extends BucketsAggregator {\n \n private final ValuesSource.Numeric valuesSource;\n private final DocValueFormat formatter;\n- private final Rounding rounding;\n+ private final double interval, offset;\n private final InternalOrder order;\n private final boolean keyed;\n-\n private final long minDocCount;\n- private final ExtendedBounds extendedBounds;\n- private final InternalHistogram.Factory histogramFactory;\n+ private final double minBound, maxBound;\n \n private final LongHash bucketOrds;\n \n- public HistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed,\n- long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,\n- DocValueFormat formatter, InternalHistogram.Factory<?> histogramFactory, AggregationContext aggregationContext,\n- Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n+ public HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset,\n+ InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,\n+ @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter,\n+ AggregationContext aggregationContext, Aggregator parent,\n+ List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n \n super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);\n- this.rounding = rounding;\n+ if (interval <= 0) {\n+ throw new IllegalArgumentException(\"interval must be positive, got: \" + interval);\n+ }\n+ this.interval = interval;\n+ this.offset = offset;\n this.order = order;\n this.keyed = keyed;\n this.minDocCount = minDocCount;\n- this.extendedBounds = extendedBounds;\n+ this.minBound = minBound;\n+ this.maxBound = maxBound;\n this.valuesSource = valuesSource;\n this.formatter = formatter;\n- this.histogramFactory = histogramFactory;\n \n bucketOrds = new LongHash(1, aggregationContext.bigArrays());\n }\n@@ -90,23 +95,24 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,\n if (valuesSource == null) {\n return LeafBucketCollector.NO_OP_COLLECTOR;\n }\n- final SortedNumericDocValues values = valuesSource.longValues(ctx);\n+\n+ final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);\n return new LeafBucketCollectorBase(sub, values) {\n @Override\n public void collect(int doc, long bucket) throws IOException {\n assert bucket == 0;\n values.setDocument(doc);\n final int valuesCount = values.count();\n \n- long previousKey = Long.MIN_VALUE;\n+ double previousKey = Double.NEGATIVE_INFINITY;\n for (int i = 0; i < valuesCount; ++i) {\n- long value = values.valueAt(i);\n- long key = rounding.roundKey(value);\n+ double value = values.valueAt(i);\n+ double key = Math.floor((value - offset) / interval);\n assert key >= previousKey;\n if (key == previousKey) {\n continue;\n }\n- long bucketOrd = bucketOrds.add(key);\n+ long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key));\n if (bucketOrd < 0) { // already seen\n bucketOrd = -1 - bucketOrd;\n collectExistingBucket(sub, doc, bucketOrd);\n@@ -120,26 +126,32 @@ public void collect(int doc, long bucket) throws IOException {\n }\n \n @Override\n- public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {\n- assert owningBucketOrdinal == 0;\n+ public InternalAggregation buildAggregation(long bucket) throws IOException {\n+ assert bucket == 0;\n List<InternalHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());\n for (long i = 0; i < bucketOrds.size(); i++) {\n- buckets.add(histogramFactory.createBucket(rounding.valueForKey(bucketOrds.get(i)), bucketDocCount(i), bucketAggregations(i), keyed, formatter));\n+ double roundKey = Double.longBitsToDouble(bucketOrds.get(i));\n+ double key = roundKey * interval + offset;\n+ buckets.add(new InternalHistogram.Bucket(key, bucketDocCount(i), keyed, formatter, bucketAggregations(i)));\n }\n \n // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order\n CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator());\n \n- // value source will be null for unmapped fields\n- InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null;\n- return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());\n+ EmptyBucketInfo emptyBucketInfo = null;\n+ if (minDocCount == 0) {\n+ emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());\n+ }\n+ return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());\n }\n \n @Override\n public InternalAggregation buildEmptyAggregation() {\n- InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null;\n- return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(),\n- metaData());\n+ EmptyBucketInfo emptyBucketInfo = null;\n+ if (minDocCount == 0) {\n+ emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());\n+ }\n+ return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java", "status": "modified" }, { "diff": "@@ -19,24 +19,66 @@\n \n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n+import org.elasticsearch.search.aggregations.Aggregator;\n import org.elasticsearch.search.aggregations.AggregatorFactories;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n+import org.elasticsearch.search.aggregations.support.AggregationContext;\n+import org.elasticsearch.search.aggregations.support.ValuesSource;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;\n+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;\n \n import java.io.IOException;\n+import java.util.List;\n import java.util.Map;\n \n-import org.elasticsearch.search.aggregations.support.AggregationContext;\n-import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;\n+public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, HistogramAggregatorFactory> {\n+\n+ private final double interval, offset;\n+ private final InternalOrder order;\n+ private final boolean keyed;\n+ private final long minDocCount;\n+ private final double minBound, maxBound;\n+\n+ HistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, double interval, double offset,\n+ InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,\n+ AggregationContext context, AggregatorFactory<?> parent,\n+ AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {\n+ super(name, type, config, context, parent, subFactoriesBuilder, metaData);\n+ this.interval = interval;\n+ this.offset = offset;\n+ this.order = order;\n+ this.keyed = keyed;\n+ this.minDocCount = minDocCount;\n+ this.minBound = minBound;\n+ this.maxBound = maxBound;\n+ }\n+\n+ public long minDocCount() {\n+ return minDocCount;\n+ }\n \n-public class HistogramAggregatorFactory extends AbstractHistogramAggregatorFactory<HistogramAggregatorFactory> {\n+ @Override\n+ protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,\n+ List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n+ if (collectsFromSingleBucket == false) {\n+ return asMultiBucketAggregator(this, context, parent);\n+ }\n+ return createAggregator(valuesSource, parent, pipelineAggregators, metaData);\n+ }\n+\n+ private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,\n+ Map<String, Object> metaData) throws IOException {\n \n- public HistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, long interval, long offset,\n- InternalOrder order, boolean keyed, long minDocCount, ExtendedBounds extendedBounds, AggregationContext context,\n- AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {\n- super(name, type, config, interval, offset, order, keyed, minDocCount, extendedBounds, InternalHistogram.HISTOGRAM_FACTORY, context,\n- parent, subFactoriesBuilder, metaData);\n+ return new HistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, valuesSource,\n+ config.format(), context, parent, pipelineAggregators, metaData);\n }\n \n-}\n+ @Override\n+ protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)\n+ throws IOException {\n+ return createAggregator(null, parent, pipelineAggregators, metaData);\n+ }\n+}\n\\ No newline at end of file", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -0,0 +1,48 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.search.aggregations.bucket.histogram;\n+\n+import org.elasticsearch.search.aggregations.InternalAggregation;\n+import org.elasticsearch.search.aggregations.InternalAggregations;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n+\n+import java.util.List;\n+\n+/** Implemented by histogram aggregations and used by pipeline aggregations to insert buckets. */\n+// public so that pipeline aggs can use this API: can we fix it?\n+public interface HistogramFactory {\n+\n+ /** Get the key for the given bucket. Date histograms must return the\n+ * number of millis since Epoch of the bucket key while numeric histograms\n+ * must return the double value of the key. */\n+ Number getKey(MultiBucketsAggregation.Bucket bucket);\n+\n+ /** Given a key returned by {@link #getKey}, compute the lowest key that is \n+ * greater than it. */\n+ Number nextKey(Number key);\n+\n+ /** Create an {@link InternalAggregation} object that wraps the given buckets. */\n+ InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets);\n+\n+ /** Create a {@link MultiBucketsAggregation.Bucket} object that wraps the\n+ * given key, document count and aggregations. */\n+ MultiBucketsAggregation.Bucket createBucket(Number key, long docCount, InternalAggregations aggregations);\n+\n+}", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramFactory.java", "status": "added" }, { "diff": "@@ -20,8 +20,9 @@\n \n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParseFieldMatcher;\n+import org.elasticsearch.common.ParseFieldMatcherSupplier;\n import org.elasticsearch.common.ParsingException;\n-import org.elasticsearch.common.rounding.Rounding;\n+import org.elasticsearch.common.xcontent.ObjectParser;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentParser.Token;\n import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;\n@@ -32,46 +33,51 @@\n import java.util.Map;\n \n /**\n- * Parses the histogram request\n+ * A parser for date histograms. This translates json into an\n+ * {@link HistogramAggregationBuilder} instance.\n */\n public class HistogramParser extends NumericValuesSourceParser {\n \n- public HistogramParser() {\n- super(true, true, false);\n+ private static final ObjectParser<double[], ParseFieldMatcherSupplier> EXTENDED_BOUNDS_PARSER = new ObjectParser<>(\n+ Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName(),\n+ () -> new double[]{ Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY });\n+ static {\n+ EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[0] = d, new ParseField(\"min\"));\n+ EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[1] = d, new ParseField(\"max\"));\n }\n \n- protected HistogramParser(boolean timezoneAware) {\n- super(true, true, timezoneAware);\n+ public HistogramParser() {\n+ super(true, true, false);\n }\n \n @Override\n- protected AbstractHistogramBuilder<?> createFactory(String aggregationName, ValuesSourceType valuesSourceType,\n+ protected HistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType,\n ValueType targetValueType, Map<ParseField, Object> otherOptions) {\n HistogramAggregationBuilder factory = new HistogramAggregationBuilder(aggregationName);\n- Long interval = (Long) otherOptions.get(Rounding.Interval.INTERVAL_FIELD);\n+ Double interval = (Double) otherOptions.get(Histogram.INTERVAL_FIELD);\n if (interval == null) {\n throw new ParsingException(null, \"Missing required field [interval] for histogram aggregation [\" + aggregationName + \"]\");\n } else {\n factory.interval(interval);\n }\n- Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD);\n+ Double offset = (Double) otherOptions.get(Histogram.OFFSET_FIELD);\n if (offset != null) {\n factory.offset(offset);\n }\n \n- ExtendedBounds extendedBounds = (ExtendedBounds) otherOptions.get(ExtendedBounds.EXTENDED_BOUNDS_FIELD);\n+ double[] extendedBounds = (double[]) otherOptions.get(Histogram.EXTENDED_BOUNDS_FIELD);\n if (extendedBounds != null) {\n- factory.extendedBounds(extendedBounds);\n+ factory.extendedBounds(extendedBounds[0], extendedBounds[1]);\n }\n- Boolean keyed = (Boolean) otherOptions.get(HistogramAggregator.KEYED_FIELD);\n+ Boolean keyed = (Boolean) otherOptions.get(Histogram.KEYED_FIELD);\n if (keyed != null) {\n factory.keyed(keyed);\n }\n- Long minDocCount = (Long) otherOptions.get(HistogramAggregator.MIN_DOC_COUNT_FIELD);\n+ Long minDocCount = (Long) otherOptions.get(Histogram.MIN_DOC_COUNT_FIELD);\n if (minDocCount != null) {\n factory.minDocCount(minDocCount);\n }\n- InternalOrder order = (InternalOrder) otherOptions.get(HistogramAggregator.ORDER_FIELD);\n+ InternalOrder order = (InternalOrder) otherOptions.get(Histogram.ORDER_FIELD);\n if (order != null) {\n factory.order(order);\n }\n@@ -82,33 +88,23 @@ protected AbstractHistogramBuilder<?> createFactory(String aggregationName, Valu\n protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,\n ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {\n if (token.isValue()) {\n- if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) {\n- if (token == XContentParser.Token.VALUE_STRING) {\n- otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parseStringInterval(parser.text()));\n- return true;\n- } else {\n- otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue());\n- return true;\n- }\n- } else if (parseFieldMatcher.match(currentFieldName, HistogramAggregator.MIN_DOC_COUNT_FIELD)) {\n- otherOptions.put(HistogramAggregator.MIN_DOC_COUNT_FIELD, parser.longValue());\n+ if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) {\n+ otherOptions.put(Histogram.INTERVAL_FIELD, parser.doubleValue());\n return true;\n- } else if (parseFieldMatcher.match(currentFieldName, HistogramAggregator.KEYED_FIELD)) {\n- otherOptions.put(HistogramAggregator.KEYED_FIELD, parser.booleanValue());\n+ } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {\n+ otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue());\n+ return true;\n+ } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {\n+ otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());\n+ return true;\n+ } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) {\n+ otherOptions.put(Histogram.OFFSET_FIELD, parser.doubleValue());\n return true;\n- } else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) {\n- if (token == XContentParser.Token.VALUE_STRING) {\n- otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parseStringOffset(parser.text()));\n- return true;\n- } else {\n- otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue());\n- return true;\n- }\n } else {\n return false;\n }\n } else if (token == XContentParser.Token.START_OBJECT) {\n- if (parseFieldMatcher.match(currentFieldName, HistogramAggregator.ORDER_FIELD)) {\n+ if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) {\n InternalOrder order = null;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n@@ -124,14 +120,11 @@ protected boolean token(String aggregationName, String currentFieldName, Token t\n order = resolveOrder(currentFieldName, asc);\n }\n }\n- otherOptions.put(HistogramAggregator.ORDER_FIELD, order);\n+ otherOptions.put(Histogram.ORDER_FIELD, order);\n return true;\n- } else if (parseFieldMatcher.match(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) {\n- try {\n- otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, ExtendedBounds.PARSER.apply(parser, () -> parseFieldMatcher));\n- } catch (Exception e) {\n- throw new ParsingException(parser.getTokenLocation(), \"Error parsing [{}]\", e, aggregationName);\n- }\n+ } else if (parseFieldMatcher.match(currentFieldName, Histogram.EXTENDED_BOUNDS_FIELD)) {\n+ double[] bounds = EXTENDED_BOUNDS_PARSER.apply(parser, () -> parseFieldMatcher);\n+ otherOptions.put(Histogram.EXTENDED_BOUNDS_FIELD, bounds);\n return true;\n } else {\n return false;\n@@ -141,14 +134,6 @@ protected boolean token(String aggregationName, String currentFieldName, Token t\n }\n }\n \n- protected Object parseStringInterval(String interval) {\n- return Long.valueOf(interval);\n- }\n-\n- protected long parseStringOffset(String offset) throws IOException {\n- return Long.valueOf(offset);\n- }\n-\n static InternalOrder resolveOrder(String key, boolean asc) {\n if (\"_key\".equals(key)) {\n return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java", "status": "modified" }, { "diff": "@@ -18,36 +18,71 @@\n */\n package org.elasticsearch.search.aggregations.bucket.histogram;\n \n+import org.apache.lucene.util.CollectionUtil;\n+import org.apache.lucene.util.PriorityQueue;\n import org.elasticsearch.common.io.stream.StreamInput;\n+import org.elasticsearch.common.io.stream.StreamOutput;\n+import org.elasticsearch.common.rounding.Rounding;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.search.DocValueFormat;\n-import org.elasticsearch.search.aggregations.AggregationExecutionException;\n-import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n+import org.elasticsearch.search.aggregations.Aggregations;\n+import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n-import org.elasticsearch.search.aggregations.support.ValueType;\n+import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.joda.time.DateTime;\n import org.joda.time.DateTimeZone;\n \n import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.Iterator;\n+import java.util.List;\n+import java.util.ListIterator;\n+import java.util.Map;\n \n /**\n- * Results of a date_historgram aggregation.\n+ * Imelementation of {@link Histogram}.\n */\n-public class InternalDateHistogram {\n+public final class InternalDateHistogram extends InternalMultiBucketAggregation<InternalDateHistogram, InternalDateHistogram.Bucket>\n+ implements Histogram, HistogramFactory {\n \n- public static final Factory HISTOGRAM_FACTORY = new Factory();\n static final Type TYPE = new Type(\"date_histogram\");\n \n- static class Bucket extends InternalHistogram.Bucket {\n- Bucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, DocValueFormat formatter,\n- InternalHistogram.Factory<Bucket> factory) {\n- super(key, docCount, keyed, formatter, factory, aggregations);\n+ public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket {\n+\n+ final long key;\n+ final long docCount;\n+ final InternalAggregations aggregations;\n+ private final transient boolean keyed;\n+ protected final transient DocValueFormat format;\n+\n+ public Bucket(long key, long docCount, boolean keyed, DocValueFormat format,\n+ InternalAggregations aggregations) {\n+ this.format = format;\n+ this.keyed = keyed;\n+ this.key = key;\n+ this.docCount = docCount;\n+ this.aggregations = aggregations;\n }\n \n /**\n * Read from a stream.\n */\n- Bucket(StreamInput in, boolean keyed, DocValueFormat formatter, InternalHistogram.Factory<Bucket> factory) throws IOException {\n- super(in, keyed, formatter, factory);\n+ public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {\n+ this.format = format;\n+ this.keyed = keyed;\n+ key = in.readLong();\n+ docCount = in.readVLong();\n+ aggregations = InternalAggregations.readAggregations(in);\n+ }\n+\n+ @Override\n+ public void writeTo(StreamOutput out) throws IOException {\n+ out.writeLong(key);\n+ out.writeVLong(docCount);\n+ aggregations.writeTo(out);\n }\n \n @Override\n@@ -56,53 +91,356 @@ public String getKeyAsString() {\n }\n \n @Override\n- public DateTime getKey() {\n+ public Object getKey() {\n return new DateTime(key, DateTimeZone.UTC);\n }\n \n @Override\n- public String toString() {\n- return getKeyAsString();\n+ public long getDocCount() {\n+ return docCount;\n }\n- }\n \n- static class Factory extends InternalHistogram.Factory<InternalDateHistogram.Bucket> {\n+ @Override\n+ public Aggregations getAggregations() {\n+ return aggregations;\n+ }\n \n- Factory() {\n+ Bucket reduce(List<Bucket> buckets, ReduceContext context) {\n+ List<InternalAggregations> aggregations = new ArrayList<>(buckets.size());\n+ long docCount = 0;\n+ for (Bucket bucket : buckets) {\n+ docCount += bucket.docCount;\n+ aggregations.add((InternalAggregations) bucket.getAggregations());\n+ }\n+ InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);\n+ return new InternalDateHistogram.Bucket(key, docCount, keyed, format, aggs);\n }\n \n @Override\n- public Type type() {\n- return TYPE;\n+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n+ String keyAsString = format.format(key);\n+ if (keyed) {\n+ builder.startObject(keyAsString);\n+ } else {\n+ builder.startObject();\n+ }\n+ if (format != DocValueFormat.RAW) {\n+ builder.field(CommonFields.KEY_AS_STRING, keyAsString);\n+ }\n+ builder.field(CommonFields.KEY, key);\n+ builder.field(CommonFields.DOC_COUNT, docCount);\n+ aggregations.toXContentInternal(builder, params);\n+ builder.endObject();\n+ return builder;\n }\n \n- @Override\n- public ValueType valueType() {\n- return ValueType.DATE;\n+ public DocValueFormat getFormatter() {\n+ return format;\n }\n \n- @Override\n- public InternalDateHistogram.Bucket createBucket(InternalAggregations aggregations, InternalDateHistogram.Bucket prototype) {\n- return new Bucket(prototype.key, prototype.docCount, aggregations, prototype.getKeyed(), prototype.format, this);\n+ public boolean getKeyed() {\n+ return keyed;\n }\n+ }\n \n- @Override\n- public InternalDateHistogram.Bucket createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed,\n- DocValueFormat formatter) {\n- if (key instanceof Number) {\n- return new Bucket(((Number) key).longValue(), docCount, aggregations, keyed, formatter, this);\n- } else if (key instanceof DateTime) {\n- return new Bucket(((DateTime) key).getMillis(), docCount, aggregations, keyed, formatter, this);\n+ static class EmptyBucketInfo {\n+\n+ final Rounding rounding;\n+ final InternalAggregations subAggregations;\n+ final ExtendedBounds bounds;\n+\n+ EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations) {\n+ this(rounding, subAggregations, null);\n+ }\n+\n+ EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations, ExtendedBounds bounds) {\n+ this.rounding = rounding;\n+ this.subAggregations = subAggregations;\n+ this.bounds = bounds;\n+ }\n+\n+ EmptyBucketInfo(StreamInput in) throws IOException {\n+ rounding = Rounding.Streams.read(in);\n+ subAggregations = InternalAggregations.readAggregations(in);\n+ bounds = in.readOptionalWriteable(ExtendedBounds::new);\n+ }\n+\n+ void writeTo(StreamOutput out) throws IOException {\n+ Rounding.Streams.write(rounding, out);\n+ subAggregations.writeTo(out);\n+ out.writeOptionalWriteable(bounds);\n+ }\n+\n+ }\n+\n+ private final List<Bucket> buckets;\n+ private final InternalOrder order;\n+ private final DocValueFormat format;\n+ private final boolean keyed;\n+ private final long minDocCount;\n+ private final EmptyBucketInfo emptyBucketInfo;\n+\n+ InternalDateHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,\n+ DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,\n+ Map<String, Object> metaData) {\n+ super(name, pipelineAggregators, metaData);\n+ this.buckets = buckets;\n+ this.order = order;\n+ assert (minDocCount == 0) == (emptyBucketInfo != null);\n+ this.minDocCount = minDocCount;\n+ this.emptyBucketInfo = emptyBucketInfo;\n+ this.format = formatter;\n+ this.keyed = keyed;\n+ }\n+\n+ /**\n+ * Stream from a stream.\n+ */\n+ public InternalDateHistogram(StreamInput in) throws IOException {\n+ super(in);\n+ order = InternalOrder.Streams.readOrder(in);\n+ minDocCount = in.readVLong();\n+ if (minDocCount == 0) {\n+ emptyBucketInfo = new EmptyBucketInfo(in);\n+ } else {\n+ emptyBucketInfo = null;\n+ }\n+ format = in.readNamedWriteable(DocValueFormat.class);\n+ keyed = in.readBoolean();\n+ buckets = in.readList(stream -> new Bucket(stream, keyed, format));\n+ }\n+\n+ @Override\n+ protected void doWriteTo(StreamOutput out) throws IOException {\n+ InternalOrder.Streams.writeOrder(order, out);\n+ out.writeVLong(minDocCount);\n+ if (minDocCount == 0) {\n+ emptyBucketInfo.writeTo(out);\n+ }\n+ out.writeNamedWriteable(format);\n+ out.writeBoolean(keyed);\n+ out.writeList(buckets);\n+ }\n+\n+ @Override\n+ public String getWriteableName() {\n+ return DateHistogramAggregationBuilder.NAME;\n+ }\n+\n+ @Override\n+ public List<Histogram.Bucket> getBuckets() {\n+ return Collections.unmodifiableList(buckets);\n+ }\n+\n+ @Override\n+ public InternalDateHistogram create(List<Bucket> buckets) {\n+ return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format,\n+ keyed, pipelineAggregators(), metaData);\n+ }\n+\n+ @Override\n+ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {\n+ return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations);\n+ }\n+\n+ private static class IteratorAndCurrent {\n+\n+ private final Iterator<Bucket> iterator;\n+ private Bucket current;\n+\n+ IteratorAndCurrent(Iterator<Bucket> iterator) {\n+ this.iterator = iterator;\n+ current = iterator.next();\n+ }\n+\n+ }\n+\n+ private List<Bucket> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {\n+\n+ final PriorityQueue<IteratorAndCurrent> pq = new PriorityQueue<IteratorAndCurrent>(aggregations.size()) {\n+ @Override\n+ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {\n+ return a.current.key < b.current.key;\n+ }\n+ };\n+ for (InternalAggregation aggregation : aggregations) {\n+ InternalDateHistogram histogram = (InternalDateHistogram) aggregation;\n+ if (histogram.buckets.isEmpty() == false) {\n+ pq.add(new IteratorAndCurrent(histogram.buckets.iterator()));\n+ }\n+ }\n+\n+ List<Bucket> reducedBuckets = new ArrayList<>();\n+ if (pq.size() > 0) {\n+ // list of buckets coming from different shards that have the same key\n+ List<Bucket> currentBuckets = new ArrayList<>();\n+ double key = pq.top().current.key;\n+\n+ do {\n+ final IteratorAndCurrent top = pq.top();\n+\n+ if (top.current.key != key) {\n+ // the key changes, reduce what we already buffered and reset the buffer for current buckets\n+ final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);\n+ if (reduced.getDocCount() >= minDocCount) {\n+ reducedBuckets.add(reduced);\n+ }\n+ currentBuckets.clear();\n+ key = top.current.key;\n+ }\n+\n+ currentBuckets.add(top.current);\n+\n+ if (top.iterator.hasNext()) {\n+ final Bucket next = top.iterator.next();\n+ assert next.key > top.current.key : \"shards must return data sorted by key\";\n+ top.current = next;\n+ pq.updateTop();\n+ } else {\n+ pq.pop();\n+ }\n+ } while (pq.size() > 0);\n+\n+ if (currentBuckets.isEmpty() == false) {\n+ final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);\n+ if (reduced.getDocCount() >= minDocCount) {\n+ reducedBuckets.add(reduced);\n+ }\n+ }\n+ }\n+\n+ return reducedBuckets;\n+ }\n+\n+ private void addEmptyBuckets(List<Bucket> list, ReduceContext reduceContext) {\n+ Bucket lastBucket = null;\n+ ExtendedBounds bounds = emptyBucketInfo.bounds;\n+ ListIterator<Bucket> iter = list.listIterator();\n+\n+ // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)\n+ InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations),\n+ reduceContext);\n+ if (bounds != null) {\n+ Bucket firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;\n+ if (firstBucket == null) {\n+ if (bounds.getMin() != null && bounds.getMax() != null) {\n+ long key = bounds.getMin();\n+ long max = bounds.getMax();\n+ while (key <= max) {\n+ iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n+ key = emptyBucketInfo.rounding.nextRoundingValue(key);\n+ }\n+ }\n } else {\n- throw new AggregationExecutionException(\"Expected key of type Number or DateTime but got [\" + key + \"]\");\n+ if (bounds.getMin() != null) {\n+ long key = bounds.getMin();\n+ if (key < firstBucket.key) {\n+ while (key < firstBucket.key) {\n+ iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n+ key = emptyBucketInfo.rounding.nextRoundingValue(key);\n+ }\n+ }\n+ }\n }\n }\n \n- @Override\n- protected Bucket readBucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {\n- return new Bucket(in, keyed, format, this);\n+ // now adding the empty buckets within the actual data,\n+ // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6\n+ while (iter.hasNext()) {\n+ Bucket nextBucket = list.get(iter.nextIndex());\n+ if (lastBucket != null) {\n+ long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);\n+ while (key < nextBucket.key) {\n+ iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n+ key = emptyBucketInfo.rounding.nextRoundingValue(key);\n+ }\n+ assert key == nextBucket.key;\n+ }\n+ lastBucket = iter.next();\n+ }\n+\n+ // finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)\n+ if (bounds != null && lastBucket != null && bounds.getMax() != null && bounds.getMax() > lastBucket.key) {\n+ long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);\n+ long max = bounds.getMax();\n+ while (key <= max) {\n+ iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n+ key = emptyBucketInfo.rounding.nextRoundingValue(key);\n+ }\n+ }\n+ }\n+\n+ @Override\n+ public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {\n+ List<Bucket> reducedBuckets = reduceBuckets(aggregations, reduceContext);\n+\n+ // adding empty buckets if needed\n+ if (minDocCount == 0) {\n+ addEmptyBuckets(reducedBuckets, reduceContext);\n }\n+\n+ if (order == InternalOrder.KEY_ASC) {\n+ // nothing to do, data are already sorted since shards return\n+ // sorted buckets and the merge-sort performed by reduceBuckets\n+ // maintains order\n+ } else if (order == InternalOrder.KEY_DESC) {\n+ // we just need to reverse here...\n+ List<Bucket> reverse = new ArrayList<>(reducedBuckets);\n+ Collections.reverse(reverse);\n+ reducedBuckets = reverse;\n+ } else {\n+ // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort\n+ CollectionUtil.introSort(reducedBuckets, order.comparator());\n+ }\n+\n+ return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo,\n+ format, keyed, pipelineAggregators(), getMetaData());\n }\n \n- private InternalDateHistogram() {}\n+ @Override\n+ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {\n+ if (keyed) {\n+ builder.startObject(CommonFields.BUCKETS);\n+ } else {\n+ builder.startArray(CommonFields.BUCKETS);\n+ }\n+ for (Bucket bucket : buckets) {\n+ bucket.toXContent(builder, params);\n+ }\n+ if (keyed) {\n+ builder.endObject();\n+ } else {\n+ builder.endArray();\n+ }\n+ return builder;\n+ }\n+\n+ // HistogramFactory method impls\n+\n+ @Override\n+ public Number getKey(MultiBucketsAggregation.Bucket bucket) {\n+ return ((Bucket) bucket).key;\n+ }\n+\n+ @Override\n+ public Number nextKey(Number key) {\n+ return emptyBucketInfo.rounding.nextRoundingValue(key.longValue());\n+ }\n+\n+ @Override\n+ public InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets) {\n+ // convert buckets to the right type\n+ List<Bucket> buckets2 = new ArrayList<>(buckets.size());\n+ for (Object b : buckets) {\n+ buckets2.add((Bucket) b);\n+ }\n+ buckets2 = Collections.unmodifiableList(buckets2);\n+ return new InternalDateHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format,\n+ keyed, pipelineAggregators(), getMetaData());\n+ }\n+\n+ @Override\n+ public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {\n+ return new Bucket(key.longValue(), docCount, keyed, format, aggregations);\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java", "status": "modified" }, { "diff": "@@ -22,16 +22,14 @@\n import org.apache.lucene.util.PriorityQueue;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n-import org.elasticsearch.common.rounding.Rounding;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.search.DocValueFormat;\n-import org.elasticsearch.search.aggregations.AggregationExecutionException;\n import org.elasticsearch.search.aggregations.Aggregations;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n-import org.elasticsearch.search.aggregations.support.ValueType;\n \n import java.io.IOException;\n import java.util.ArrayList;\n@@ -42,28 +40,25 @@\n import java.util.Map;\n \n /**\n- * TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})?\n+ * Imelementation of {@link Histogram}.\n */\n-public class InternalHistogram<B extends InternalHistogram.Bucket> extends InternalMultiBucketAggregation<InternalHistogram<B>, B>\n- implements Histogram {\n+public final class InternalHistogram extends InternalMultiBucketAggregation<InternalHistogram, InternalHistogram.Bucket>\n+ implements Histogram, HistogramFactory {\n \n- public static final Factory<Bucket> HISTOGRAM_FACTORY = new Factory<Bucket>();\n static final Type TYPE = new Type(\"histogram\");\n \n public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket {\n \n- final long key;\n+ final double key;\n final long docCount;\n final InternalAggregations aggregations;\n private final transient boolean keyed;\n protected final transient DocValueFormat format;\n- private final Factory<?> factory;\n \n- public Bucket(long key, long docCount, boolean keyed, DocValueFormat format, Factory<?> factory,\n+ public Bucket(double key, long docCount, boolean keyed, DocValueFormat format,\n InternalAggregations aggregations) {\n this.format = format;\n this.keyed = keyed;\n- this.factory = factory;\n this.key = key;\n this.docCount = docCount;\n this.aggregations = aggregations;\n@@ -72,26 +67,21 @@ public Bucket(long key, long docCount, boolean keyed, DocValueFormat format, Fac\n /**\n * Read from a stream.\n */\n- public Bucket(StreamInput in, boolean keyed, DocValueFormat format, Factory<?> factory) throws IOException {\n+ public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {\n this.format = format;\n this.keyed = keyed;\n- this.factory = factory;\n- key = in.readLong();\n+ key = in.readDouble();\n docCount = in.readVLong();\n aggregations = InternalAggregations.readAggregations(in);\n }\n \n @Override\n public void writeTo(StreamOutput out) throws IOException {\n- out.writeLong(key);\n+ out.writeDouble(key);\n out.writeVLong(docCount);\n aggregations.writeTo(out);\n }\n \n- protected Factory<?> getFactory() {\n- return factory;\n- }\n-\n @Override\n public String getKeyAsString() {\n return format.format(key);\n@@ -112,16 +102,15 @@ public Aggregations getAggregations() {\n return aggregations;\n }\n \n- @SuppressWarnings(\"unchecked\")\n- <B extends Bucket> B reduce(List<B> buckets, ReduceContext context) {\n+ Bucket reduce(List<Bucket> buckets, ReduceContext context) {\n List<InternalAggregations> aggregations = new ArrayList<>(buckets.size());\n long docCount = 0;\n for (Bucket bucket : buckets) {\n docCount += bucket.docCount;\n aggregations.add((InternalAggregations) bucket.getAggregations());\n }\n InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);\n- return (B) getFactory().createBucket(key, docCount, aggs, keyed, format);\n+ return new InternalHistogram.Bucket(key, docCount, keyed, format, aggs);\n }\n \n @Override\n@@ -153,96 +142,40 @@ public boolean getKeyed() {\n \n static class EmptyBucketInfo {\n \n- final Rounding rounding;\n+ final double interval, offset, minBound, maxBound;\n final InternalAggregations subAggregations;\n- final ExtendedBounds bounds;\n-\n- EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations) {\n- this(rounding, subAggregations, null);\n- }\n \n- EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations, ExtendedBounds bounds) {\n- this.rounding = rounding;\n+ EmptyBucketInfo(double interval, double offset, double minBound, double maxBound, InternalAggregations subAggregations) {\n+ this.interval = interval;\n+ this.offset = offset;\n+ this.minBound = minBound;\n+ this.maxBound = maxBound;\n this.subAggregations = subAggregations;\n- this.bounds = bounds;\n- }\n-\n- public static EmptyBucketInfo readFrom(StreamInput in) throws IOException {\n- Rounding rounding = Rounding.Streams.read(in);\n- InternalAggregations aggs = InternalAggregations.readAggregations(in);\n- if (in.readBoolean()) {\n- return new EmptyBucketInfo(rounding, aggs, new ExtendedBounds(in));\n- }\n- return new EmptyBucketInfo(rounding, aggs);\n- }\n-\n- public static void writeTo(EmptyBucketInfo info, StreamOutput out) throws IOException {\n- Rounding.Streams.write(info.rounding, out);\n- info.subAggregations.writeTo(out);\n- out.writeBoolean(info.bounds != null);\n- if (info.bounds != null) {\n- info.bounds.writeTo(out);\n- }\n- }\n-\n- }\n-\n- public static class Factory<B extends InternalHistogram.Bucket> {\n-\n- protected Factory() {\n- }\n-\n- public Type type() {\n- return TYPE;\n- }\n-\n- public ValueType valueType() {\n- return ValueType.NUMERIC;\n- }\n-\n- public InternalHistogram<B> create(String name, List<B> buckets, InternalOrder order, long minDocCount,\n- EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed,\n- List<PipelineAggregator> pipelineAggregators,\n- Map<String, Object> metaData) {\n- return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, pipelineAggregators,\n- metaData);\n- }\n-\n- public InternalHistogram<B> create(List<B> buckets, InternalHistogram<B> prototype) {\n- return new InternalHistogram<>(prototype.name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo,\n- prototype.format, prototype.keyed, this, prototype.pipelineAggregators(), prototype.metaData);\n }\n \n- @SuppressWarnings(\"unchecked\")\n- public B createBucket(InternalAggregations aggregations, B prototype) {\n- return (B) new Bucket(prototype.key, prototype.docCount, prototype.getKeyed(), prototype.format, this, aggregations);\n+ EmptyBucketInfo(StreamInput in) throws IOException {\n+ this(in.readDouble(), in.readDouble(), in.readDouble(), in.readDouble(), InternalAggregations.readAggregations(in));\n }\n \n- @SuppressWarnings(\"unchecked\")\n- public B createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, DocValueFormat formatter) {\n- if (key instanceof Number) {\n- return (B) new Bucket(((Number) key).longValue(), docCount, keyed, formatter, this, aggregations);\n- } else {\n- throw new AggregationExecutionException(\"Expected key of type Number but got [\" + key + \"]\");\n- }\n+ public void writeTo(StreamOutput out) throws IOException {\n+ out.writeDouble(interval);\n+ out.writeDouble(offset);\n+ out.writeDouble(minBound);\n+ out.writeDouble(maxBound);\n+ subAggregations.writeTo(out);\n }\n \n- @SuppressWarnings(\"unchecked\")\n- protected B readBucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {\n- return (B) new Bucket(in, keyed, format, this);\n- }\n }\n \n- private final List<B> buckets;\n+ private final List<Bucket> buckets;\n private final InternalOrder order;\n private final DocValueFormat format;\n private final boolean keyed;\n private final long minDocCount;\n private final EmptyBucketInfo emptyBucketInfo;\n- private final Factory<B> factory;\n \n- InternalHistogram(String name, List<B> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,\n- DocValueFormat formatter, boolean keyed, Factory<B> factory, List<PipelineAggregator> pipelineAggregators,\n+ InternalHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,\n+ DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,\n Map<String, Object> metaData) {\n super(name, pipelineAggregators, metaData);\n this.buckets = buckets;\n@@ -252,45 +185,31 @@ protected B readBucket(StreamInput in, boolean keyed, DocValueFormat format) thr\n this.emptyBucketInfo = emptyBucketInfo;\n this.format = formatter;\n this.keyed = keyed;\n- this.factory = factory;\n }\n \n /**\n * Stream from a stream.\n */\n public InternalHistogram(StreamInput in) throws IOException {\n super(in);\n- factory = resolveFactory(in.readString());\n order = InternalOrder.Streams.readOrder(in);\n minDocCount = in.readVLong();\n if (minDocCount == 0) {\n- emptyBucketInfo = EmptyBucketInfo.readFrom(in);\n+ emptyBucketInfo = new EmptyBucketInfo(in);\n } else {\n emptyBucketInfo = null;\n }\n format = in.readNamedWriteable(DocValueFormat.class);\n keyed = in.readBoolean();\n- buckets = in.readList(stream -> factory.readBucket(stream, keyed, format));\n- }\n-\n- @SuppressWarnings(\"unchecked\")\n- protected static <B extends InternalHistogram.Bucket> Factory<B> resolveFactory(String factoryType) {\n- if (factoryType.equals(InternalDateHistogram.TYPE.name())) {\n- return (Factory<B>) new InternalDateHistogram.Factory();\n- } else if (factoryType.equals(TYPE.name())) {\n- return new Factory<>();\n- } else {\n- throw new IllegalStateException(\"Invalid histogram factory type [\" + factoryType + \"]\");\n- }\n+ buckets = in.readList(stream -> new Bucket(stream, keyed, format));\n }\n \n @Override\n protected void doWriteTo(StreamOutput out) throws IOException {\n- out.writeString(factory.type().name());\n InternalOrder.Streams.writeOrder(order, out);\n out.writeVLong(minDocCount);\n if (minDocCount == 0) {\n- EmptyBucketInfo.writeTo(emptyBucketInfo, out);\n+ emptyBucketInfo.writeTo(out);\n }\n out.writeNamedWriteable(format);\n out.writeBoolean(keyed);\n@@ -303,68 +222,59 @@ public String getWriteableName() {\n }\n \n @Override\n- public List<B> getBuckets() {\n- return buckets;\n- }\n-\n- public Factory<B> getFactory() {\n- return factory;\n- }\n-\n- public Rounding getRounding() {\n- return emptyBucketInfo.rounding;\n+ public List<Histogram.Bucket> getBuckets() {\n+ return Collections.unmodifiableList(buckets);\n }\n \n @Override\n- public InternalHistogram<B> create(List<B> buckets) {\n- return getFactory().create(buckets, this);\n+ public InternalHistogram create(List<Bucket> buckets) {\n+ return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(), metaData);\n }\n \n @Override\n- public B createBucket(InternalAggregations aggregations, B prototype) {\n- return getFactory().createBucket(aggregations, prototype);\n+ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {\n+ return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations);\n }\n \n- private static class IteratorAndCurrent<B> {\n+ private static class IteratorAndCurrent {\n \n- private final Iterator<B> iterator;\n- private B current;\n+ private final Iterator<Bucket> iterator;\n+ private Bucket current;\n \n- IteratorAndCurrent(Iterator<B> iterator) {\n+ IteratorAndCurrent(Iterator<Bucket> iterator) {\n this.iterator = iterator;\n current = iterator.next();\n }\n \n }\n \n- private List<B> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {\n+ private List<Bucket> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {\n \n- final PriorityQueue<IteratorAndCurrent<B>> pq = new PriorityQueue<IteratorAndCurrent<B>>(aggregations.size()) {\n+ final PriorityQueue<IteratorAndCurrent> pq = new PriorityQueue<IteratorAndCurrent>(aggregations.size()) {\n @Override\n- protected boolean lessThan(IteratorAndCurrent<B> a, IteratorAndCurrent<B> b) {\n+ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {\n return a.current.key < b.current.key;\n }\n };\n for (InternalAggregation aggregation : aggregations) {\n- @SuppressWarnings(\"unchecked\")\n- InternalHistogram<B> histogram = (InternalHistogram<B>) aggregation;\n+ InternalHistogram histogram = (InternalHistogram) aggregation;\n if (histogram.buckets.isEmpty() == false) {\n- pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator()));\n+ pq.add(new IteratorAndCurrent(histogram.buckets.iterator()));\n }\n }\n \n- List<B> reducedBuckets = new ArrayList<>();\n+ List<Bucket> reducedBuckets = new ArrayList<>();\n if (pq.size() > 0) {\n // list of buckets coming from different shards that have the same key\n- List<B> currentBuckets = new ArrayList<>();\n- long key = pq.top().current.key;\n+ List<Bucket> currentBuckets = new ArrayList<>();\n+ double key = pq.top().current.key;\n \n do {\n- final IteratorAndCurrent<B> top = pq.top();\n+ final IteratorAndCurrent top = pq.top();\n \n if (top.current.key != key) {\n // the key changes, reduce what we already buffered and reset the buffer for current buckets\n- final B reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);\n+ final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);\n if (reduced.getDocCount() >= minDocCount) {\n reducedBuckets.add(reduced);\n }\n@@ -375,7 +285,7 @@ protected boolean lessThan(IteratorAndCurrent<B> a, IteratorAndCurrent<B> b) {\n currentBuckets.add(top.current);\n \n if (top.iterator.hasNext()) {\n- final B next = top.iterator.next();\n+ final Bucket next = top.iterator.next();\n assert next.key > top.current.key : \"shards must return data sorted by key\";\n top.current = next;\n pq.updateTop();\n@@ -385,7 +295,7 @@ protected boolean lessThan(IteratorAndCurrent<B> a, IteratorAndCurrent<B> b) {\n } while (pq.size() > 0);\n \n if (currentBuckets.isEmpty() == false) {\n- final B reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);\n+ final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);\n if (reduced.getDocCount() >= minDocCount) {\n reducedBuckets.add(reduced);\n }\n@@ -395,75 +305,62 @@ protected boolean lessThan(IteratorAndCurrent<B> a, IteratorAndCurrent<B> b) {\n return reducedBuckets;\n }\n \n- private void addEmptyBuckets(List<B> list, ReduceContext reduceContext) {\n- B lastBucket = null;\n- ExtendedBounds bounds = emptyBucketInfo.bounds;\n- ListIterator<B> iter = list.listIterator();\n+ private double nextKey(double key) {\n+ return round(key + emptyBucketInfo.interval + emptyBucketInfo.interval / 2);\n+ }\n+\n+ private double round(double key) {\n+ return Math.floor((key - emptyBucketInfo.offset) / emptyBucketInfo.interval) * emptyBucketInfo.interval + emptyBucketInfo.offset;\n+ }\n+\n+ private void addEmptyBuckets(List<Bucket> list, ReduceContext reduceContext) {\n+ ListIterator<Bucket> iter = list.listIterator();\n \n // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)\n- InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations),\n+ InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(\n+ Collections.singletonList(emptyBucketInfo.subAggregations),\n reduceContext);\n- if (bounds != null) {\n- B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;\n- if (firstBucket == null) {\n- if (bounds.getMin() != null && bounds.getMax() != null) {\n- long key = bounds.getMin();\n- long max = bounds.getMax();\n- while (key <= max) {\n- iter.add(getFactory().createBucket(key, 0,\n- reducedEmptySubAggs,\n- keyed, format));\n- key = emptyBucketInfo.rounding.nextRoundingValue(key);\n- }\n- }\n- } else {\n- if (bounds.getMin() != null) {\n- long key = bounds.getMin();\n- if (key < firstBucket.key) {\n- while (key < firstBucket.key) {\n- iter.add(getFactory().createBucket(key, 0,\n- reducedEmptySubAggs,\n- keyed, format));\n- key = emptyBucketInfo.rounding.nextRoundingValue(key);\n- }\n- }\n+\n+ if (iter.hasNext() == false) {\n+ // fill with empty buckets\n+ for (double key = round(emptyBucketInfo.minBound); key <= emptyBucketInfo.maxBound; key = nextKey(key)) {\n+ iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n+ }\n+ } else {\n+ Bucket first = list.get(iter.nextIndex());\n+ if (Double.isFinite(emptyBucketInfo.minBound)) {\n+ // fill with empty buckets until the first key\n+ for (double key = round(emptyBucketInfo.minBound); key < first.key; key = nextKey(key)) {\n+ iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n }\n }\n- }\n \n- // now adding the empty buckets within the actual data,\n- // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6\n- while (iter.hasNext()) {\n- B nextBucket = list.get(iter.nextIndex());\n- if (lastBucket != null) {\n- long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);\n- while (key < nextBucket.key) {\n- iter.add(getFactory().createBucket(key, 0,\n- reducedEmptySubAggs, keyed,\n- format));\n- key = emptyBucketInfo.rounding.nextRoundingValue(key);\n+ // now adding the empty buckets within the actual data,\n+ // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6\n+ Bucket lastBucket = null;\n+ do {\n+ Bucket nextBucket = list.get(iter.nextIndex());\n+ if (lastBucket != null) {\n+ double key = nextKey(lastBucket.key);\n+ while (key < nextBucket.key) {\n+ iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n+ key = nextKey(key);\n+ }\n+ assert key == nextBucket.key;\n }\n- assert key == nextBucket.key;\n- }\n- lastBucket = iter.next();\n- }\n+ lastBucket = iter.next();\n+ } while (iter.hasNext());\n \n- // finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)\n- if (bounds != null && lastBucket != null && bounds.getMax() != null && bounds.getMax() > lastBucket.key) {\n- long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);\n- long max = bounds.getMax();\n- while (key <= max) {\n- iter.add(getFactory().createBucket(key, 0,\n- reducedEmptySubAggs, keyed,\n- format));\n- key = emptyBucketInfo.rounding.nextRoundingValue(key);\n+ // finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)\n+ for (double key = nextKey(lastBucket.key); key <= emptyBucketInfo.maxBound; key = nextKey(key)) {\n+ iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));\n }\n }\n }\n \n @Override\n public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {\n- List<B> reducedBuckets = reduceBuckets(aggregations, reduceContext);\n+ List<Bucket> reducedBuckets = reduceBuckets(aggregations, reduceContext);\n \n // adding empty buckets if needed\n if (minDocCount == 0) {\n@@ -476,15 +373,15 @@ public InternalAggregation doReduce(List<InternalAggregation> aggregations, Redu\n // maintains order\n } else if (order == InternalOrder.KEY_DESC) {\n // we just need to reverse here...\n- List<B> reverse = new ArrayList<>(reducedBuckets);\n+ List<Bucket> reverse = new ArrayList<>(reducedBuckets);\n Collections.reverse(reverse);\n reducedBuckets = reverse;\n } else {\n // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort\n CollectionUtil.introSort(reducedBuckets, order.comparator());\n }\n \n- return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(),\n+ return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(),\n getMetaData());\n }\n \n@@ -495,7 +392,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n } else {\n builder.startArray(CommonFields.BUCKETS);\n }\n- for (B bucket : buckets) {\n+ for (Bucket bucket : buckets) {\n bucket.toXContent(builder, params);\n }\n if (keyed) {\n@@ -506,4 +403,33 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th\n return builder;\n }\n \n+ // HistogramFactory method impls\n+\n+ @Override\n+ public Number getKey(MultiBucketsAggregation.Bucket bucket) {\n+ return ((Bucket) bucket).key;\n+ }\n+\n+ @Override\n+ public Number nextKey(Number key) {\n+ return nextKey(key.doubleValue());\n+ }\n+\n+ @Override\n+ public InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets) {\n+ // convert buckets to the right type\n+ List<Bucket> buckets2 = new ArrayList<>(buckets.size());\n+ for (Object b : buckets) {\n+ buckets2.add((Bucket) b);\n+ }\n+ buckets2 = Collections.unmodifiableList(buckets2);\n+ return new InternalHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format,\n+ keyed, pipelineAggregators(), getMetaData());\n+ }\n+\n+ @Override\n+ public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {\n+ return new Bucket(key.doubleValue(), docCount, keyed, format, aggregations);\n+ }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java", "status": "modified" }, { "diff": "@@ -35,9 +35,9 @@ class InternalOrder extends Histogram.Order {\n final byte id;\n final String key;\n final boolean asc;\n- final Comparator<InternalHistogram.Bucket> comparator;\n+ final Comparator<Histogram.Bucket> comparator;\n \n- InternalOrder(byte id, String key, boolean asc, Comparator<InternalHistogram.Bucket> comparator) {\n+ InternalOrder(byte id, String key, boolean asc, Comparator<Histogram.Bucket> comparator) {\n this.id = id;\n this.key = key;\n this.asc = asc;\n@@ -57,7 +57,7 @@ boolean asc() {\n }\n \n @Override\n- Comparator<InternalHistogram.Bucket> comparator() {\n+ Comparator<Histogram.Bucket> comparator() {\n return comparator;\n }\n \n@@ -90,11 +90,7 @@ static class Aggregation extends InternalOrder {\n static final byte ID = 0;\n \n Aggregation(String key, boolean asc) {\n- super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<InternalHistogram.Bucket>(key, asc));\n- }\n-\n- private static String key(String aggName, String valueName) {\n- return (valueName == null) ? aggName : aggName + \".\" + valueName;\n+ super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<Histogram.Bucket>(key, asc));\n }\n \n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.elasticsearch.search.aggregations.AggregationExecutionException;\n import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;\n import org.elasticsearch.search.aggregations.InvalidAggregationPathException;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation;\n import org.elasticsearch.search.aggregations.support.AggregationPath;\n \n@@ -145,13 +146,13 @@ public String getName() {\n * @return The value extracted from <code>bucket</code> found at\n * <code>aggPath</code>\n */\n- public static Double resolveBucketValue(InternalMultiBucketAggregation<?, ? extends InternalMultiBucketAggregation.Bucket> agg,\n+ public static Double resolveBucketValue(MultiBucketsAggregation agg,\n InternalMultiBucketAggregation.Bucket bucket, String aggPath, GapPolicy gapPolicy) {\n List<String> aggPathsList = AggregationPath.parse(aggPath).getPathElementsAsStringList();\n return resolveBucketValue(agg, bucket, aggPathsList, gapPolicy);\n }\n \n- public static Double resolveBucketValue(InternalMultiBucketAggregation<?, ? extends InternalMultiBucketAggregation.Bucket> agg,\n+ public static Double resolveBucketValue(MultiBucketsAggregation agg,\n InternalMultiBucketAggregation.Bucket bucket, List<String> aggPathAsList, GapPolicy gapPolicy) {\n try {\n Object propertyValue = bucket.getProperty(agg.getName(), aggPathAsList);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java", "status": "modified" }, { "diff": "@@ -29,7 +29,8 @@\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramAggregatorFactory;\n+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;\n import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser;\n@@ -104,15 +105,21 @@ public void doValidate(AggregatorFactory<?> parent, AggregatorFactory<?>[] aggFa\n throw new IllegalStateException(BUCKETS_PATH.getPreferredName()\n + \" must contain a single entry for aggregation [\" + name + \"]\");\n }\n- if (!(parent instanceof AbstractHistogramAggregatorFactory<?>)) {\n- throw new IllegalStateException(\"cumulative sum aggregation [\" + name\n- + \"] must have a histogram or date_histogram as parent\");\n- } else {\n- AbstractHistogramAggregatorFactory<?> histoParent = (AbstractHistogramAggregatorFactory<?>) parent;\n+ if (parent instanceof HistogramAggregatorFactory) {\n+ HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;\n+ if (histoParent.minDocCount() != 0) {\n+ throw new IllegalStateException(\"parent histogram of cumulative sum aggregation [\" + name\n+ + \"] must have min_doc_count of 0\");\n+ }\n+ } else if (parent instanceof DateHistogramAggregatorFactory) {\n+ DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;\n if (histoParent.minDocCount() != 0) {\n throw new IllegalStateException(\"parent histogram of cumulative sum aggregation [\" + name\n + \"] must have min_doc_count of 0\");\n }\n+ } else {\n+ throw new IllegalStateException(\"cumulative sum aggregation [\" + name\n+ + \"] must have a histogram or date_histogram as parent\");\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -25,7 +25,9 @@\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n-import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;\n import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;\n import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n@@ -68,23 +70,22 @@ public String getWriteableName() {\n \n @Override\n public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {\n- InternalHistogram histo = (InternalHistogram) aggregation;\n- List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();\n- InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();\n+ MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ HistogramFactory factory = (HistogramFactory) histo;\n \n- List newBuckets = new ArrayList<>();\n+ List<Bucket> newBuckets = new ArrayList<>();\n double sum = 0;\n- for (InternalHistogram.Bucket bucket : buckets) {\n+ for (Bucket bucket : buckets) {\n Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);\n sum += thisBucketValue;\n List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {\n return (InternalAggregation) p;\n }).collect(Collectors.toList());\n aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<PipelineAggregator>(), metaData()));\n- InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(),\n- new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter());\n+ Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));\n newBuckets.add(newBucket);\n }\n- return factory.create(newBuckets, histo);\n+ return factory.createAggregation(newBuckets);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java", "status": "modified" }, { "diff": "@@ -31,9 +31,9 @@\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramAggregatorFactory;\n import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;\n import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;\n import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;\n import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n@@ -162,15 +162,21 @@ public void doValidate(AggregatorFactory<?> parent, AggregatorFactory<?>[] aggFa\n throw new IllegalStateException(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()\n + \" must contain a single entry for aggregation [\" + name + \"]\");\n }\n- if (!(parent instanceof AbstractHistogramAggregatorFactory<?>)) {\n- throw new IllegalStateException(\"derivative aggregation [\" + name\n- + \"] must have a histogram or date_histogram as parent\");\n- } else {\n- AbstractHistogramAggregatorFactory<?> histoParent = (AbstractHistogramAggregatorFactory<?>) parent;\n+ if (parent instanceof HistogramAggregatorFactory) {\n+ HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;\n+ if (histoParent.minDocCount() != 0) {\n+ throw new IllegalStateException(\"parent histogram of derivative aggregation [\" + name\n+ + \"] must have min_doc_count of 0\");\n+ }\n+ } else if (parent instanceof DateHistogramAggregatorFactory) {\n+ DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;\n if (histoParent.minDocCount() != 0) {\n throw new IllegalStateException(\"parent histogram of derivative aggregation [\" + name\n + \"] must have min_doc_count of 0\");\n }\n+ } else {\n+ throw new IllegalStateException(\"derivative aggregation [\" + name\n+ + \"] must have a histogram or date_histogram as parent\");\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -22,14 +22,14 @@\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.search.DocValueFormat;\n-import org.elasticsearch.search.aggregations.AggregationExecutionException;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n-import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;\n import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n-import org.joda.time.DateTime;\n \n import java.io.IOException;\n import java.util.ArrayList;\n@@ -77,47 +77,35 @@ public String getWriteableName() {\n \n @Override\n public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {\n- InternalHistogram histo = (InternalHistogram) aggregation;\n- List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();\n- InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();\n+ MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ HistogramFactory factory = (HistogramFactory) histo;\n \n- List newBuckets = new ArrayList<>();\n- Long lastBucketKey = null;\n+ List<Bucket> newBuckets = new ArrayList<>();\n+ Number lastBucketKey = null;\n Double lastBucketValue = null;\n- for (InternalHistogram.Bucket bucket : buckets) {\n- Long thisBucketKey = resolveBucketKeyAsLong(bucket);\n+ for (Bucket bucket : buckets) {\n+ Number thisBucketKey = factory.getKey(bucket);\n Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);\n if (lastBucketValue != null && thisBucketValue != null) {\n double gradient = thisBucketValue - lastBucketValue;\n double xDiff = -1;\n if (xAxisUnits != null) {\n- xDiff = (thisBucketKey - lastBucketKey) / xAxisUnits;\n+ xDiff = (thisBucketKey.doubleValue() - lastBucketKey.doubleValue()) / xAxisUnits;\n }\n final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {\n return (InternalAggregation) p;\n }).collect(Collectors.toList());\n aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, new ArrayList<PipelineAggregator>(), metaData()));\n- InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations(\n- aggs), bucket.getKeyed(), bucket.getFormatter());\n+ Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));\n newBuckets.add(newBucket);\n } else {\n newBuckets.add(bucket);\n }\n lastBucketKey = thisBucketKey;\n lastBucketValue = thisBucketValue;\n }\n- return factory.create(newBuckets, histo);\n+ return factory.createAggregation(newBuckets);\n }\n \n- private Long resolveBucketKeyAsLong(InternalHistogram.Bucket bucket) {\n- Object key = bucket.getKey();\n- if (key instanceof DateTime) {\n- return ((DateTime) key).getMillis();\n- } else if (key instanceof Number) {\n- return ((Number) key).longValue();\n- } else {\n- throw new AggregationExecutionException(\"InternalBucket keys must be either a Number or a DateTime for aggregation \" + name()\n- + \". Found bucket with key \" + key);\n- }\n- }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java", "status": "modified" }, { "diff": "@@ -30,7 +30,8 @@\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;\n-import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramAggregatorFactory;\n+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;\n import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;\n import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n@@ -267,15 +268,21 @@ public void doValidate(AggregatorFactory<?> parent, AggregatorFactory<?>[] aggFa\n throw new IllegalStateException(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()\n + \" must contain a single entry for aggregation [\" + name + \"]\");\n }\n- if (!(parent instanceof AbstractHistogramAggregatorFactory<?>)) {\n- throw new IllegalStateException(\"moving average aggregation [\" + name\n- + \"] must have a histogram or date_histogram as parent\");\n- } else {\n- AbstractHistogramAggregatorFactory<?> histoParent = (AbstractHistogramAggregatorFactory<?>) parent;\n+ if (parent instanceof HistogramAggregatorFactory) {\n+ HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;\n+ if (histoParent.minDocCount() != 0) {\n+ throw new IllegalStateException(\"parent histogram of moving average aggregation [\" + name\n+ + \"] must have min_doc_count of 0\");\n+ }\n+ } else if (parent instanceof DateHistogramAggregatorFactory) {\n+ DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;\n if (histoParent.minDocCount() != 0) {\n throw new IllegalStateException(\"parent histogram of moving average aggregation [\" + name\n + \"] must have min_doc_count of 0\");\n }\n+ } else {\n+ throw new IllegalStateException(\"moving average aggregation [\" + name\n+ + \"] must have a histogram or date_histogram as parent\");\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -23,16 +23,16 @@\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.search.DocValueFormat;\n-import org.elasticsearch.search.aggregations.AggregationExecutionException;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n-import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;\n import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;\n import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;\n-import org.joda.time.DateTime;\n \n import java.io.IOException;\n import java.util.ArrayList;\n@@ -93,14 +93,14 @@ public String getWriteableName() {\n \n @Override\n public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {\n- InternalHistogram histo = (InternalHistogram) aggregation;\n- List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();\n- InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();\n+ MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ HistogramFactory factory = (HistogramFactory) histo;\n \n- List newBuckets = new ArrayList<>();\n+ List<Bucket> newBuckets = new ArrayList<>();\n EvictingQueue<Double> values = new EvictingQueue<>(this.window);\n \n- long lastValidKey = 0;\n+ Number lastValidKey = 0;\n int lastValidPosition = 0;\n int counter = 0;\n \n@@ -110,12 +110,12 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext\n model = minimize(buckets, histo, model);\n }\n \n- for (InternalHistogram.Bucket bucket : buckets) {\n+ for (Bucket bucket : buckets) {\n Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);\n \n // Default is to reuse existing bucket. Simplifies the rest of the logic,\n // since we only change newBucket if we can add to it\n- InternalHistogram.Bucket newBucket = bucket;\n+ Bucket newBucket = bucket;\n \n if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) {\n \n@@ -127,18 +127,11 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext\n return (InternalAggregation) p;\n }).collect(Collectors.toList());\n aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<PipelineAggregator>(), metaData()));\n- newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations(\n- aggs), bucket.getKeyed(), bucket.getFormatter());\n+ newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));\n }\n \n if (predict > 0) {\n- if (bucket.getKey() instanceof Number) {\n- lastValidKey = ((Number) bucket.getKey()).longValue();\n- } else if (bucket.getKey() instanceof DateTime) {\n- lastValidKey = ((DateTime) bucket.getKey()).getMillis();\n- } else {\n- throw new AggregationExecutionException(\"Expected key of type Number or DateTime but got [\" + lastValidKey + \"]\");\n- }\n+ lastValidKey = factory.getKey(bucket);\n lastValidPosition = counter;\n }\n \n@@ -150,29 +143,22 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext\n }\n \n if (buckets.size() > 0 && predict > 0) {\n-\n- boolean keyed;\n- DocValueFormat formatter;\n- keyed = buckets.get(0).getKeyed();\n- formatter = buckets.get(0).getFormatter();\n-\n double[] predictions = model.predict(values, predict);\n for (int i = 0; i < predictions.length; i++) {\n \n List<InternalAggregation> aggs;\n- long newKey = histo.getRounding().nextRoundingValue(lastValidKey);\n+ Number newKey = factory.nextKey(lastValidKey);\n \n if (lastValidPosition + i + 1 < newBuckets.size()) {\n- InternalHistogram.Bucket bucket = (InternalHistogram.Bucket) newBuckets.get(lastValidPosition + i + 1);\n+ Bucket bucket = newBuckets.get(lastValidPosition + i + 1);\n \n // Get the existing aggs in the bucket so we don't clobber data\n aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {\n return (InternalAggregation) p;\n }).collect(Collectors.toList());\n aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));\n \n- InternalHistogram.Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(\n- aggs), keyed, formatter);\n+ Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));\n \n // Overwrite the existing bucket with the new version\n newBuckets.set(lastValidPosition + i + 1, newBucket);\n@@ -182,8 +168,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext\n aggs = new ArrayList<>();\n aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));\n \n- InternalHistogram.Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(\n- aggs), keyed, formatter);\n+ Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));\n \n // Since this is a new bucket, simply append it\n newBuckets.add(newBucket);\n@@ -192,16 +177,16 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext\n }\n }\n \n- return factory.create(newBuckets, histo);\n+ return factory.createAggregation(newBuckets);\n }\n \n- private MovAvgModel minimize(List<? extends InternalHistogram.Bucket> buckets, InternalHistogram histo, MovAvgModel model) {\n+ private MovAvgModel minimize(List<? extends Bucket> buckets, MultiBucketsAggregation histo, MovAvgModel model) {\n \n int counter = 0;\n EvictingQueue<Double> values = new EvictingQueue<>(this.window);\n \n double[] test = new double[window];\n- ListIterator<? extends InternalHistogram.Bucket> iter = buckets.listIterator(buckets.size());\n+ ListIterator<? extends Bucket> iter = buckets.listIterator(buckets.size());\n \n // We have to walk the iterator backwards because we don't know if/how many buckets are empty.\n while (iter.hasPrevious() && counter < window) {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java", "status": "modified" }, { "diff": "@@ -26,8 +26,10 @@\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.InternalAggregation;\n import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;\n+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;\n+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;\n import org.elasticsearch.search.aggregations.InternalAggregations;\n-import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;\n import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;\n import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;\n import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;\n@@ -78,17 +80,17 @@ public String getWriteableName() {\n \n @Override\n public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {\n- InternalHistogram histo = (InternalHistogram) aggregation;\n- List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();\n- InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();\n+ MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ HistogramFactory factory = (HistogramFactory) histo;\n \n- List newBuckets = new ArrayList<>();\n+ List<Bucket> newBuckets = new ArrayList<>();\n EvictingQueue<Double> lagWindow = new EvictingQueue<>(lag);\n int counter = 0;\n \n- for (InternalHistogram.Bucket bucket : buckets) {\n+ for (Bucket bucket : buckets) {\n Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);\n- InternalHistogram.Bucket newBucket = bucket;\n+ Bucket newBucket = bucket;\n \n counter += 1;\n \n@@ -113,15 +115,14 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext\n return (InternalAggregation) p;\n }).collect(Collectors.toList());\n aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList<PipelineAggregator>(), metaData()));\n- newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations(\n- aggs), bucket.getKeyed(), bucket.getFormatter());\n+ newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));\n }\n \n \n newBuckets.add(newBucket);\n lagWindow.add(thisBucketValue);\n \n }\n- return factory.create(newBuckets, histo);\n+ return factory.createAggregation(newBuckets);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java", "status": "modified" }, { "diff": "@@ -33,10 +33,8 @@ public void testInterval() {\n int interval = 10;\n Rounding.Interval rounding = new Rounding.Interval(interval);\n int value = 24;\n- final long key = rounding.roundKey(24);\n final long r = rounding.round(24);\n String message = \"round(\" + value + \", interval=\" + interval + \") = \" + r;\n- assertEquals(value/interval, key);\n assertEquals(value/interval * interval, r);\n assertEquals(message, 0, r % interval);\n }\n@@ -46,13 +44,11 @@ public void testIntervalRandom() {\n Rounding.Interval rounding = new Rounding.Interval(interval);\n for (int i = 0; i < 1000; ++i) {\n long l = Math.max(randomLong(), Long.MIN_VALUE + interval);\n- final long key = rounding.roundKey(l);\n final long r = rounding.round(l);\n String message = \"round(\" + l + \", interval=\" + interval + \") = \" + r;\n assertEquals(message, 0, r % interval);\n assertThat(message, r, lessThanOrEqualTo(l));\n assertThat(message, r + interval, greaterThan(l));\n- assertEquals(message, r, key*interval);\n }\n }\n \n@@ -65,15 +61,11 @@ public void testOffsetRounding() {\n final long interval = 10;\n final long offset = 7;\n Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(new Rounding.Interval(interval), offset);\n- assertEquals(-1, rounding.roundKey(6));\n assertEquals(-3, rounding.round(6));\n assertEquals(7, rounding.nextRoundingValue(-3));\n- assertEquals(0, rounding.roundKey(7));\n assertEquals(7, rounding.round(7));\n assertEquals(17, rounding.nextRoundingValue(7));\n- assertEquals(0, rounding.roundKey(16));\n assertEquals(7, rounding.round(16));\n- assertEquals(1, rounding.roundKey(17));\n assertEquals(17, rounding.round(17));\n assertEquals(27, rounding.nextRoundingValue(17));\n }\n@@ -89,13 +81,10 @@ public void testOffsetRoundingRandom() {\n Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset);\n long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow\n long value = Math.max(randomLong() - safetyMargin, Long.MIN_VALUE + safetyMargin);\n- final long key = rounding.roundKey(value);\n- final long key_next = rounding.roundKey(value + interval);\n final long r_value = rounding.round(value);\n final long nextRoundingValue = rounding.nextRoundingValue(r_value);\n assertThat(\"Rounding should be idempotent\", r_value, equalTo(rounding.round(r_value)));\n assertThat(\"Rounded value smaller than unrounded, regardless of offset\", r_value - offset, lessThanOrEqualTo(value - offset));\n- assertThat(\"Key and next_key should differ by one\", key_next - key, equalTo(1L));\n assertThat(\"Rounded value <= value < next interval start\", r_value + interval, greaterThan(value));\n assertThat(\"NextRounding value should be interval from rounded value\", r_value + interval, equalTo(nextRoundingValue));\n }", "filename": "core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java", "status": "modified" }, { "diff": "@@ -65,9 +65,6 @@ public void testUTCIntervalRounding() {\n Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).build();\n DateTimeZone tz = DateTimeZone.UTC;\n assertThat(tzRounding.round(time(\"2009-02-03T01:01:01\")), isDate(time(\"2009-02-03T00:00:00.000Z\"), tz));\n- long roundKey = tzRounding.roundKey(time(\"2009-02-03T01:01:01\"));\n- assertThat(roundKey, isDate(tzRounding.roundKey(time(\"2009-02-03T00:00:00.000Z\")), tz));\n- assertThat(tzRounding.valueForKey(roundKey), isDate(time(\"2009-02-03T00:00:00.000Z\"), tz));\n assertThat(tzRounding.nextRoundingValue(time(\"2009-02-03T00:00:00.000Z\")), isDate(time(\"2009-02-03T12:00:00.000Z\"), tz));\n assertThat(tzRounding.round(time(\"2009-02-03T13:01:01\")), isDate(time(\"2009-02-03T12:00:00.000Z\"), tz));\n assertThat(tzRounding.nextRoundingValue(time(\"2009-02-03T12:00:00.000Z\")), isDate(time(\"2009-02-04T00:00:00.000Z\"), tz));\n@@ -86,9 +83,6 @@ public void testTimeIntervalTimeZoneRounding() {\n DateTimeZone tz = DateTimeZone.forOffsetHours(-1);\n Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build();\n assertThat(tzRounding.round(time(\"2009-02-03T00:01:01\")), isDate(time(\"2009-02-02T19:00:00.000Z\"), tz));\n- long roundKey = tzRounding.roundKey(time(\"2009-02-03T00:01:01\"));\n- assertThat(roundKey, equalTo(tzRounding.roundKey(time(\"2009-02-02T19:00:00.000Z\"))));\n- assertThat(tzRounding.valueForKey(roundKey), isDate(time(\"2009-02-02T19:00:00.000Z\"), tz));\n assertThat(tzRounding.nextRoundingValue(time(\"2009-02-02T19:00:00.000Z\")), isDate(time(\"2009-02-03T01:00:00.000Z\"), tz));\n \n assertThat(tzRounding.round(time(\"2009-02-03T13:01:01\")), isDate(time(\"2009-02-03T13:00:00.000Z\"), tz));\n@@ -102,9 +96,6 @@ public void testDayIntervalTimeZoneRounding() {\n DateTimeZone tz = DateTimeZone.forOffsetHours(-8);\n Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build();\n assertThat(tzRounding.round(time(\"2009-02-03T00:01:01\")), isDate(time(\"2009-02-02T20:00:00.000Z\"), tz));\n- long roundKey = tzRounding.roundKey(time(\"2009-02-03T00:01:01\"));\n- assertThat(roundKey, isDate(tzRounding.roundKey(time(\"2009-02-02T20:00:00.000Z\")), tz));\n- assertThat(tzRounding.valueForKey(roundKey), isDate(time(\"2009-02-02T20:00:00.000Z\"), tz));\n assertThat(tzRounding.nextRoundingValue(time(\"2009-02-02T20:00:00.000Z\")), isDate(time(\"2009-02-03T08:00:00.000Z\"), tz));\n \n assertThat(tzRounding.round(time(\"2009-02-03T13:01:01\")), isDate(time(\"2009-02-03T08:00:00.000Z\"), tz));\n@@ -130,17 +121,11 @@ public void testDayTimeZoneRounding() {\n tz = DateTimeZone.forID(\"-02:00\");\n tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();\n assertThat(tzRounding.round(time(\"2009-02-03T01:01:01\")), isDate(time(\"2009-02-02T02:00:00\"), tz));\n- long roundKey = tzRounding.roundKey(time(\"2009-02-03T01:01:01\"));\n- assertThat(roundKey, isDate(tzRounding.roundKey(time(\"2009-02-02T02:00:00.000Z\")), tz));\n- assertThat(tzRounding.valueForKey(roundKey), isDate(time(\"2009-02-02T02:00:00.000Z\"), tz));\n assertThat(tzRounding.nextRoundingValue(time(\"2009-02-02T02:00:00\")), isDate(time(\"2009-02-03T02:00:00\"), tz));\n \n // date in Feb-3rd, also in -02:00 timezone\n tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();\n assertThat(tzRounding.round(time(\"2009-02-03T02:01:01\")), isDate(time(\"2009-02-03T02:00:00\"), tz));\n- roundKey = tzRounding.roundKey(time(\"2009-02-03T02:01:01\"));\n- assertThat(roundKey, isDate(tzRounding.roundKey(time(\"2009-02-03T02:00:00.000Z\")), tz));\n- assertThat(tzRounding.valueForKey(roundKey), isDate(time(\"2009-02-03T02:00:00.000Z\"), tz));\n assertThat(tzRounding.nextRoundingValue(time(\"2009-02-03T02:00:00\")), isDate(time(\"2009-02-04T02:00:00\"), tz));\n }\n ", "filename": "core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java", "status": "modified" }, { "diff": "@@ -124,7 +124,7 @@ public void testUnmappedHistogram() {\n assertSearchResponse(response);\n Histogram histogram = response.getAggregations().get(\"my_histogram\");\n assertEquals(1, histogram.getBuckets().size());\n- assertEquals(10L, histogram.getBuckets().get(0).getKey());\n+ assertEquals(10d, histogram.getBuckets().get(0).getKey());\n assertEquals(2, histogram.getBuckets().get(0).getDocCount());\n }\n \n@@ -133,16 +133,16 @@ public void testHistogram() {\n assertSearchResponse(response);\n Histogram histogram = response.getAggregations().get(\"my_histogram\");\n assertEquals(2, histogram.getBuckets().size());\n- assertEquals(0L, histogram.getBuckets().get(0).getKey());\n+ assertEquals(0d, histogram.getBuckets().get(0).getKey());\n assertEquals(1, histogram.getBuckets().get(0).getDocCount());\n- assertEquals(5L, histogram.getBuckets().get(1).getKey());\n+ assertEquals(5d, histogram.getBuckets().get(1).getKey());\n assertEquals(1, histogram.getBuckets().get(1).getDocCount());\n \n response = client().prepareSearch(\"idx\").addAggregation(histogram(\"my_histogram\").field(\"long\").interval(5).missing(3)).get();\n assertSearchResponse(response);\n histogram = response.getAggregations().get(\"my_histogram\");\n assertEquals(1, histogram.getBuckets().size());\n- assertEquals(0L, histogram.getBuckets().get(0).getKey());\n+ assertEquals(0d, histogram.getBuckets().get(0).getKey());\n assertEquals(2, histogram.getBuckets().get(0).getDocCount());\n }\n ", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java", "status": "modified" } ] }
{ "body": "Hello,\n\nI am not able to combine normal (e.g., `term filter`) with `nested filter` in `filter aggregation` which is itself nested in `nested aggregation`.\n\nMapping: `tasks` with nested `events`, `events` with nested `parameters`.\n\n<pre>\ncurl -XPUT 'localhost:9200/nestingtest/' -d '\n{\n \"mappings\": {\n \"tasks\": {\n properties: {\n task_id: {\n type: \"long\",\n doc_values: true\n },\n events: {\n type: \"nested\",\n properties: {\n id: {\n type: \"long\",\n doc_values: true\n },\n parameters: {\n type: \"nested\",\n properties: {\n name: {\n type: \"string\",\n index: \"not_analyzed\",\n doc_values: true\n },\n value: {\n type: \"string\",\n index: \"not_analyzed\",\n doc_values: true\n }\n }\n }\n }\n }\n }\n }\n }\n}\n'\n</pre>\n\nData:\n\n<pre>\ncurl -XPOST 'localhost:9200/nestingtest/tasks/1' -d '\n{\n task_id: 1,\n events: [\n {\n id: 1,\n parameters: {\n name: \"attribution\",\n value: \"campaignX\"\n }\n }\n ]\n}\n'\n\ncurl -XPOST 'localhost:9200/nestingtest/tasks/2' -d '\n{\n task_id: 2,\n events: [\n {\n id: 21,\n parameters: [\n {\n name: \"attribution\",\n value: \"campaignY\"\n }\n ]\n },\n {\n id: 22,\n parameters: {\n name: \"attribution\",\n value: \"campaignY\"\n }\n }\n ]\n}\n'\n</pre>\n\n\nQuery that does not work as expected:\n\n<pre>\ncurl -XPOST 'localhost:9200/nestingtest/tasks/_search' -d '\n{\n size: 0,\n aggs: {\n \"to-events\": {\n nested: {\n path: \"events\"\n },\n aggs: {\n filtered: {\n filter: {\n nested: {\n path: \"events.parameters\",\n filter: {\n term: {\n \"events.parameters.value\": \"campaignX\"\n }\n }\n }\n }\n }\n }\n }\n }\n}\n'\n</pre>\n\n\nResponse:\n\n<pre>\n\"aggregations\": {\n \"to-events\": {\n \"doc_count\": 3,\n \"filtered\": {\n \"doc_count\": 0 //this is wrong, should be 1\n }\n }\n}\n</pre>\n\n\nIf I change the query to use the `nested aggregation`, the results are as expected:\n\n<pre>\ncurl -XPOST 'localhost:9200' -d '\n{\n size: 0,\n aggs: {\n \"to-events\": {\n nested: {\n path: \"events\"\n },\n aggs: {\n \"to-parameters\": {\n nested: {\n path: \"events.parameters\"\n },\n aggs: {\n filtered: {\n filter: {\n term: {\n \"events.parameters.value\": \"campaignX\"\n }\n }\n }\n }\n }\n }\n }\n }\n}'\n</pre>\n\nResponse:\n\n<pre>\n\"aggregations\": {\n \"to-events\": {\n \"doc_count\": 3,\n \"to-parameters\": {\n \"doc_count\": 3,\n \"filtered\": {\n \"doc_count\": 1\n }\n }\n }\n }\n</pre>\n\n\nUsing `nested aggregation` is not an ideal solution for me, as I would like to combine that `nested filter` with other filters acting at the level of events, get one number of matched documents and then dive into sub-aggs for those documents.\n# \n\n<pre>\n\"version\" : {\n \"number\" : \"1.6.0\",\n \"build_hash\" : \"cdd3ac4dde4f69524ec0a14de3828cb95bbb86d0\",\n \"build_timestamp\" : \"2015-06-09T13:36:34Z\",\n \"build_snapshot\" : false,\n \"lucene_version\" : \"4.10.4\"\n }\n</pre>\n\n", "comments": [ { "body": "@martijnvg please take a look\n", "created_at": "2015-06-18T19:33:04Z" }, { "body": "@crutch The issue is that the `nested` aggregator doesn't tell what `path` has been set to the `nested` filter. This causes the `nested` to translate the matching docs incorrectly to the `nested` aggregator and this results in no results. So this is indeed a bug and should get fixed.\n", "created_at": "2015-06-21T20:23:05Z" }, { "body": "@crutch About the `nested` aggregator solution, I think that should work for you too? You can just place another filter aggregator under the first `nested` aggregator:\n\n``` json\n{\n \"size\": 0,\n \"aggs\": {\n \"to-events\": {\n \"nested\": {\n \"path\": \"events\"\n },\n \"aggs\": {\n \"id_filter\": {\n \"filter\": {\n \"term\": {\n \"events.id\": 22\n }\n },\n \"aggs\": {\n \"to-parameters\": {\n \"nested\": {\n \"path\": \"events.parameters\"\n },\n \"aggs\": {\n \"filtered\": {\n \"filter\": {\n \"term\": {\n \"events.parameters.value\": \"campaignY\"\n }\n }\n }\n }\n }\n }\n }\n }\n }\n }\n}\n```\n", "created_at": "2015-06-22T08:00:34Z" }, { "body": "@martijnvg thank you for investigating this.\n\nUsing a `filter` aggregator under the `nested` one is my current solution. I am able to query for correct numbers, but struggle to get e.g., correct `terms` aggs. \n\nImagine that my filter is a compound one, matching some fields of `event` and some fields of nested `parameters`. And I would like to have a `terms` aggregation based on a field of `event` for all documents matching this compound filter. I can split the filter into two parts: event-level & nested-level, which is used under `nested` aggregation...but where to put the `terms` aggregation then?\n\nIf my `terms` aggregation is used outside of the `nested` aggregation, its results are not filtered by my nested filter. And I cannot put this `terms` aggregation inside the `nested` one, as it does not have access to its parent's fields.\n", "created_at": "2015-06-22T12:42:31Z" }, { "body": "@martijnvg I hit this issue once again. I need a `not` filter on top of a `nested filter`, in an aggregation. Do you have any idea how to achieve this using `not filter` and `nested aggregation` instead of `nested filter`?\n\nUsing the `not filter` inside the `nested aggregation` will match on sibling nested documents...\n", "created_at": "2015-07-30T10:29:32Z" }, { "body": "Any news about this? I have similar problems and a solution to this would help me a lot.\n", "created_at": "2015-08-13T12:51:54Z" }, { "body": "We also faced this issue during the migration from facets to aggregation framework. In our case we have a `bool` filter with `must_not` condition which contains a nested query. As described here and in https://github.com/elastic/elasticsearch/issues/12410, it is possible to represent simple conjunction filters with nested aggregations, but this is pretty much it. I don't see any way how one can represent a disjunctions or negations in this way (would appreciate if somebody could share how to do it, if it's possible). \n\nIt makes it very hard for us to do the migration to an aggregations framework. Our main use-case for facets/aggregations is a faceted navigation (which means that we are using aggregation filter buckets in conjunction with the `post_filter`). I would really appreciate if somebody could revisit this issue or at least share an information whether this issue is planned to be solved (and if yet then. of course, it would be very helpful to know when since we need to plan our migration as well).\n", "created_at": "2015-11-25T10:52:05Z" }, { "body": ":+1: \n", "created_at": "2015-11-26T15:22:35Z" }, { "body": "This is really a blocker for me, and I think there is no possible workaround. \nI have 2 levels of nesting, product > [offer] > [invprice]. I want to calculate price range facet for the products, only considering offers which have popularity of 5 and have inventory for all provided dates. This is the query I am trying to run:\n\n```\n{\n \"query\": {\n \"match\": {\n \"productcode\": \"p1\"\n }\n },\n \"aggs\": {\n \"product_offers\": {\n \"nested\": {\n \"path\": \"offers\"\n },\n \"aggs\": {\n \"offers\": {\n \"filter\": {\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"popularity\": 5\n }\n },\n {\n \"nested\": {\n \"path\": \"offers.invprice\",\n \"query\": {\n \"terms\": {\n \"offers.invprice.date\": [1444501800000]\n }\n }\n }\n },\n {\n \"nested\": {\n \"path\": \"offers.invprice\",\n \"query\": {\n \"terms\": {\n \"offers.invprice.date\": [1447093800000]\n }\n }\n }\n }\n ]\n }\n },\n \"aggs\": {\n \"price_ranges\": {\n \"nested\": {\n \"path\": \"offers.invprice\"\n }, \n \"aggs\": {\n \"ranges\": {\n \"range\": {\n \"field\": \"offers.invprice.price\",\n \"ranges\": [\n {\n \"from\": 50,\n \"to\": 300\n },\n {\n \"from\": 300,\n \"to\": 700\n },\n {\n \"from\": 700,\n \"to\": 1000\n }\n ]\n }\n }\n }\n }\n } \n }\n }\n }\n }\n}\n```\n", "created_at": "2015-11-30T19:11:03Z" }, { "body": "Since it is label as a bug, is there any ETA on a fix?\n\nThis is kind of an important feature for me\n", "created_at": "2015-12-08T21:53:52Z" }, { "body": "Hi there,\nare you consider to work on this in the next, say, 2-3 months? Otherwise we have to consider implemeting another solution to this problem.\n\nthanks\n", "created_at": "2015-12-11T13:24:20Z" }, { "body": "@ddombrowskii @sebbulon I suggest working on another solution in the meantime.\n", "created_at": "2015-12-14T17:00:28Z" }, { "body": "@clintongormley Can we expecting a fix in Elastic Stack 5.0 ?\n", "created_at": "2016-03-28T18:09:02Z" }, { "body": "It's very unfortunate that this fix is available in 5.x version only as migrating from 2.x to 5.x is not something I can do at the moment...", "created_at": "2017-03-25T20:38:04Z" }, { "body": "You can do aggs like this:\r\n\"myaggs\": {\r\n \"filter\": {\"bool\": {\"must_not\": {\"term\": {\"method.keyword\": \"POST\"}}}},\r\n \"aggs\": example_aggs\r\n}", "created_at": "2017-12-28T09:21:36Z" }, { "body": "Is that means ?", "created_at": "2017-12-28T09:23:41Z" }, { "body": "> @crutch About the `nested` aggregator solution, I think that should work for you too? You can just place another filter aggregator under the first `nested` aggregator:\r\n> \r\n> ```json\r\n> {\r\n> \"size\": 0,\r\n> \"aggs\": {\r\n> \"to-events\": {\r\n> \"nested\": {\r\n> \"path\": \"events\"\r\n> },\r\n> \"aggs\": {\r\n> \"id_filter\": {\r\n> \"filter\": {\r\n> \"term\": {\r\n> \"events.id\": 22\r\n> }\r\n> },\r\n> \"aggs\": {\r\n> \"to-parameters\": {\r\n> \"nested\": {\r\n> \"path\": \"events.parameters\"\r\n> },\r\n> \"aggs\": {\r\n> \"filtered\": {\r\n> \"filter\": {\r\n> \"term\": {\r\n> \"events.parameters.value\": \"campaignY\"\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> }\r\n> ```\r\n\r\nBased on your answer, managed to solve my issues, thank you! :)\r\nI had multiple nested documents on one single document.", "created_at": "2019-11-15T11:02:15Z" } ], "number": 11749, "title": "Aggregations: nested filter aggregation with nested filter returning no results" }
{ "body": "Before the aggregation tree was traversed to figure out what the parent level is, this commit\nchanges that by using `NestedScope` to figure out the nested depth level. \n\nThe big upsides are that this cleans up `NestedAggregator` (it used a hack to lazily figure out the nested parent filter) and this is also what `nested` query uses and therefor the `nested` query can be included inside `nested` aggregation and work correctly.\n\nPR for #11749 and #12410\n", "number": 19550, "review_comments": [ { "body": "It looks like we can remove these instance variables and only declare them in `getLeafCollector`?\n", "created_at": "2016-07-25T09:59:43Z" }, { "body": "will this also happen if the parent nested aggregation is unmapped?\n", "created_at": "2016-07-25T10:03:33Z" }, { "body": "yes we can: 2d07971ea1ddf5c79a2105a5cd8bed937cd06c11\n", "created_at": "2016-07-25T11:29:31Z" }, { "body": "good catch! This would also fail if the parent `nested` agg was unmapped. I fixed this and adjusted a test too: 222aeb0d97d1946592d7281f93c65678ac5bcada\n", "created_at": "2016-07-25T11:30:14Z" } ], "title": "Change how `nested` and `reverse_nested` aggs know about their nested depth level" }
{ "commits": [ { "message": "aggs: Changed how `nested` and `reverse_nested` aggs know about their nested depth level.\n\nBefore the aggregation tree was traversed to figure out what the parent level is, this commit\nchanges that by using `NestedScope` to figure out the nested depth level. The big upsides\nare that this cleans up `NestedAggregator` (it used a hack to lazily figure out the nested parent filter)\n and this is also what `nested` query uses and therefor the `nested` query can be included inside `nested`\n aggregation and work correctly.\n\nCloses #11749\nCloses #12410" } ], "files": [ { "diff": "@@ -229,6 +229,10 @@ public String getType() {\n return type.name();\n }\n \n+ public AggregatorFactory<?> getParent() {\n+ return parent;\n+ }\n+\n /**\n * Utility method. Given an {@link AggregatorFactory} that creates\n * {@link Aggregator}s that only know how to collect bucket <tt>0</tt>, this\n@@ -241,4 +245,4 @@ protected static Aggregator asMultiBucketAggregator(final AggregatorFactory<?> f\n return new MultiBucketAggregatorWrapper(bigArrays, context, parent, factory, first);\n }\n \n-}\n\\ No newline at end of file\n+}", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java", "status": "modified" }, { "diff": "@@ -25,8 +25,10 @@\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.mapper.object.ObjectMapper;\n import org.elasticsearch.index.query.QueryParseContext;\n import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;\n+import org.elasticsearch.search.aggregations.AggregationExecutionException;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n@@ -80,7 +82,22 @@ public String path() {\n @Override\n protected AggregatorFactory<?> doBuild(AggregationContext context, AggregatorFactory<?> parent, Builder subFactoriesBuilder)\n throws IOException {\n- return new NestedAggregatorFactory(name, type, path, context, parent, subFactoriesBuilder, metaData);\n+ ObjectMapper childObjectMapper = context.searchContext().getObjectMapper(path);\n+ if (childObjectMapper == null) {\n+ // in case the path has been unmapped:\n+ return new NestedAggregatorFactory(name, type, null, null, context, parent, subFactoriesBuilder, metaData);\n+ }\n+\n+ if (childObjectMapper.nested().isNested() == false) {\n+ throw new AggregationExecutionException(\"[nested] nested path [\" + path + \"] is not nested\");\n+ }\n+ try {\n+ ObjectMapper parentObjectMapper = context.searchContext().getQueryShardContext().nestedScope().nextLevel(childObjectMapper);\n+ return new NestedAggregatorFactory(name, type, parentObjectMapper, childObjectMapper, context, parent, subFactoriesBuilder,\n+ metaData);\n+ } finally {\n+ context.searchContext().getQueryShardContext().nestedScope().previousLevel();\n+ }\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -44,69 +44,40 @@\n import java.util.List;\n import java.util.Map;\n \n-/**\n- *\n- */\n public class NestedAggregator extends SingleBucketAggregator {\n \n static final ParseField PATH_FIELD = new ParseField(\"path\");\n \n- private BitSetProducer parentFilter;\n+ private final BitSetProducer parentFilter;\n private final Query childFilter;\n \n- private DocIdSetIterator childDocs;\n- private BitSet parentDocs;\n-\n- public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n+ public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper,\n+ AggregationContext aggregationContext, Aggregator parentAggregator,\n+ List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {\n super(name, factories, aggregationContext, parentAggregator, pipelineAggregators, metaData);\n- childFilter = objectMapper.nestedTypeFilter();\n+ Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter() : Queries.newNonNestedFilter();\n+ this.parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilter);\n+ this.childFilter = childObjectMapper.nestedTypeFilter();\n }\n \n @Override\n public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {\n- // Reset parentFilter, so we resolve the parentDocs for each new segment being searched\n- this.parentFilter = null;\n- final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);\n- final IndexSearcher searcher = new IndexSearcher(topLevelContext);\n+ IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);\n+ IndexSearcher searcher = new IndexSearcher(topLevelContext);\n searcher.setQueryCache(null);\n- final Weight weight = searcher.createNormalizedWeight(childFilter, false);\n+ Weight weight = searcher.createNormalizedWeight(childFilter, false);\n Scorer childDocsScorer = weight.scorer(ctx);\n- if (childDocsScorer == null) {\n- childDocs = null;\n- } else {\n- childDocs = childDocsScorer.iterator();\n- }\n \n+ final BitSet parentDocs = parentFilter.getBitSet(ctx);\n+ final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;\n return new LeafBucketCollectorBase(sub, null) {\n @Override\n public void collect(int parentDoc, long bucket) throws IOException {\n- // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected\n-\n- // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip:\n- if (parentDoc == 0 || childDocs == null) {\n+ // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent\n+ // doc), so we can skip:\n+ if (parentDoc == 0 || parentDocs == null || childDocs == null) {\n return;\n }\n- if (parentFilter == null) {\n- // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs\n- // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.\n- // So the trick is to set at the last moment just before needed and we can use its child filter as the\n- // parent filter.\n-\n- // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the assumption\n- // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during\n- // aggs execution\n- Query parentFilterNotCached = findClosestNestedPath(parent());\n- if (parentFilterNotCached == null) {\n- parentFilterNotCached = Queries.newNonNestedFilter();\n- }\n- parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilterNotCached);\n- parentDocs = parentFilter.getBitSet(ctx);\n- if (parentDocs == null) {\n- // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations.\n- childDocs = null;\n- return;\n- }\n- }\n \n final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);\n int childDocId = childDocs.docID();\n@@ -123,24 +94,13 @@ public void collect(int parentDoc, long bucket) throws IOException {\n \n @Override\n public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {\n- return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), pipelineAggregators(),\n- metaData());\n+ return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal),\n+ pipelineAggregators(), metaData());\n }\n \n @Override\n public InternalAggregation buildEmptyAggregation() {\n return new InternalNested(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData());\n }\n \n- private static Query findClosestNestedPath(Aggregator parent) {\n- for (; parent != null; parent = parent.parent()) {\n- if (parent instanceof NestedAggregator) {\n- return ((NestedAggregator) parent).childFilter;\n- } else if (parent instanceof ReverseNestedAggregator) {\n- return ((ReverseNestedAggregator) parent).getParentFilter();\n- }\n- }\n- return null;\n- }\n-\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java", "status": "modified" }, { "diff": "@@ -36,12 +36,15 @@\n \n public class NestedAggregatorFactory extends AggregatorFactory<NestedAggregatorFactory> {\n \n- private final String path;\n+ private final ObjectMapper parentObjectMapper;\n+ private final ObjectMapper childObjectMapper;\n \n- public NestedAggregatorFactory(String name, Type type, String path, AggregationContext context, AggregatorFactory<?> parent,\n- AggregatorFactories.Builder subFactories, Map<String, Object> metaData) throws IOException {\n+ public NestedAggregatorFactory(String name, Type type, ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper,\n+ AggregationContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories,\n+ Map<String, Object> metaData) throws IOException {\n super(name, type, context, parent, subFactories, metaData);\n- this.path = path;\n+ this.parentObjectMapper = parentObjectMapper;\n+ this.childObjectMapper = childObjectMapper;\n }\n \n @Override\n@@ -50,14 +53,10 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu\n if (collectsFromSingleBucket == false) {\n return asMultiBucketAggregator(this, context, parent);\n }\n- ObjectMapper objectMapper = context.searchContext().getObjectMapper(path);\n- if (objectMapper == null) {\n+ if (childObjectMapper == null) {\n return new Unmapped(name, context, parent, pipelineAggregators, metaData);\n }\n- if (!objectMapper.nested().isNested()) {\n- throw new AggregationExecutionException(\"[nested] nested path [\" + path + \"] is not nested\");\n- }\n- return new NestedAggregator(name, factories, objectMapper, context, parent, pipelineAggregators, metaData);\n+ return new NestedAggregator(name, factories, parentObjectMapper, childObjectMapper, context, parent, pipelineAggregators, metaData);\n }\n \n private static final class Unmapped extends NonCollectingAggregator {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -25,14 +25,20 @@\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.mapper.object.ObjectMapper;\n import org.elasticsearch.index.query.QueryParseContext;\n+import org.elasticsearch.index.query.support.NestedScope;\n+import org.elasticsearch.search.SearchParseException;\n import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;\n+import org.elasticsearch.search.aggregations.AggregationExecutionException;\n import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;\n import org.elasticsearch.search.aggregations.InternalAggregation.Type;\n import org.elasticsearch.search.aggregations.AggregatorFactory;\n import org.elasticsearch.search.aggregations.support.AggregationContext;\n \n import java.io.IOException;\n+import java.util.Deque;\n+import java.util.LinkedList;\n import java.util.Objects;\n \n public class ReverseNestedAggregationBuilder extends AbstractAggregationBuilder<ReverseNestedAggregationBuilder> {\n@@ -82,7 +88,40 @@ public String path() {\n @Override\n protected AggregatorFactory<?> doBuild(AggregationContext context, AggregatorFactory<?> parent, Builder subFactoriesBuilder)\n throws IOException {\n- return new ReverseNestedAggregatorFactory(name, type, path, context, parent, subFactoriesBuilder, metaData);\n+ if (findNestedAggregatorFactory(parent) == null) {\n+ throw new SearchParseException(context.searchContext(),\n+ \"Reverse nested aggregation [\" + name + \"] can only be used inside a [nested] aggregation\", null);\n+ }\n+\n+ ObjectMapper parentObjectMapper = null;\n+ if (path != null) {\n+ parentObjectMapper = context.searchContext().getObjectMapper(path);\n+ if (parentObjectMapper == null) {\n+ return new ReverseNestedAggregatorFactory(name, type, true, null, context, parent, subFactoriesBuilder, metaData);\n+ }\n+ if (parentObjectMapper.nested().isNested() == false) {\n+ throw new AggregationExecutionException(\"[reverse_nested] nested path [\" + path + \"] is not nested\");\n+ }\n+ }\n+\n+ NestedScope nestedScope = context.searchContext().getQueryShardContext().nestedScope();\n+ try {\n+ nestedScope.nextLevel(parentObjectMapper);\n+ return new ReverseNestedAggregatorFactory(name, type, false, parentObjectMapper, context, parent, subFactoriesBuilder,\n+ metaData);\n+ } finally {\n+ nestedScope.previousLevel();\n+ }\n+ }\n+\n+ private static NestedAggregatorFactory findNestedAggregatorFactory(AggregatorFactory<?> parent) {\n+ if (parent == null) {\n+ return null;\n+ } else if (parent instanceof NestedAggregatorFactory) {\n+ return (NestedAggregatorFactory) parent;\n+ } else {\n+ return findNestedAggregatorFactory(parent.getParent());\n+ }\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java", "status": "modified" }, { "diff": "@@ -37,46 +37,26 @@\n \n public class ReverseNestedAggregatorFactory extends AggregatorFactory<ReverseNestedAggregatorFactory> {\n \n- private final String path;\n+ private final boolean unmapped;\n+ private final ObjectMapper parentObjectMapper;\n \n- public ReverseNestedAggregatorFactory(String name, Type type, String path, AggregationContext context, AggregatorFactory<?> parent,\n- AggregatorFactories.Builder subFactories, Map<String, Object> metaData) throws IOException {\n+ public ReverseNestedAggregatorFactory(String name, Type type, boolean unmapped, ObjectMapper parentObjectMapper,\n+ AggregationContext context, AggregatorFactory<?> parent,\n+ AggregatorFactories.Builder subFactories,\n+ Map<String, Object> metaData) throws IOException {\n super(name, type, context, parent, subFactories, metaData);\n- this.path = path;\n+ this.unmapped = unmapped;\n+ this.parentObjectMapper = parentObjectMapper;\n }\n \n @Override\n public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators,\n Map<String, Object> metaData) throws IOException {\n- // Early validation\n- NestedAggregator closestNestedAggregator = findClosestNestedAggregator(parent);\n- if (closestNestedAggregator == null) {\n- throw new SearchParseException(context.searchContext(),\n- \"Reverse nested aggregation [\" + name + \"] can only be used inside a [nested] aggregation\", null);\n- }\n-\n- final ObjectMapper objectMapper;\n- if (path != null) {\n- objectMapper = context.searchContext().getObjectMapper(path);\n- if (objectMapper == null) {\n- return new Unmapped(name, context, parent, pipelineAggregators, metaData);\n- }\n- if (!objectMapper.nested().isNested()) {\n- throw new AggregationExecutionException(\"[reverse_nested] nested path [\" + path + \"] is not nested\");\n- }\n+ if (unmapped) {\n+ return new Unmapped(name, context, parent, pipelineAggregators, metaData);\n } else {\n- objectMapper = null;\n- }\n- return new ReverseNestedAggregator(name, factories, objectMapper, context, parent, pipelineAggregators, metaData);\n- }\n-\n- private static NestedAggregator findClosestNestedAggregator(Aggregator parent) {\n- for (; parent != null; parent = parent.parent()) {\n- if (parent instanceof NestedAggregator) {\n- return (NestedAggregator) parent;\n- }\n+ return new ReverseNestedAggregator(name, factories, parentObjectMapper, context, parent, pipelineAggregators, metaData);\n }\n- return null;\n }\n \n private static final class Unmapped extends NonCollectingAggregator {", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.search.aggregations.bucket;\n \n+import org.apache.lucene.search.join.ScoreMode;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.search.SearchResponse;\n@@ -44,6 +45,7 @@\n import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;\n import static org.elasticsearch.index.query.QueryBuilders.termQuery;\n import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;\n import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;\n@@ -62,15 +64,12 @@\n import static org.hamcrest.Matchers.sameInstance;\n import static org.hamcrest.core.IsNull.notNullValue;\n \n-/**\n- *\n- */\n @ESIntegTestCase.SuiteScopeTestCase\n public class NestedIT extends ESIntegTestCase {\n \n- static int numParents;\n- static int[] numChildren;\n- static SubAggCollectionMode aggCollectionMode;\n+ private static int numParents;\n+ private static int[] numChildren;\n+ private static SubAggCollectionMode aggCollectionMode;\n \n @Override\n public void setupSuiteScopeCluster() throws Exception {\n@@ -245,7 +244,7 @@ public void testNestedWithSubTermsAgg() throws Exception {\n assertThat(nested, notNullValue());\n assertThat(nested.getName(), equalTo(\"nested\"));\n assertThat(nested.getDocCount(), equalTo(docCount));\n- assertThat((long) nested.getProperty(\"_count\"), equalTo(docCount));\n+ assertThat(nested.getProperty(\"_count\"), equalTo(docCount));\n assertThat(nested.getAggregations().asList().isEmpty(), is(false));\n \n LongTerms values = nested.getAggregations().get(\"values\");\n@@ -263,7 +262,7 @@ public void testNestedWithSubTermsAgg() throws Exception {\n assertEquals(counts[i], bucket.getDocCount());\n }\n }\n- assertThat((LongTerms) nested.getProperty(\"values\"), sameInstance(values));\n+ assertThat(nested.getProperty(\"values\"), sameInstance(values));\n }\n \n public void testNestedAsSubAggregation() throws Exception {\n@@ -544,4 +543,126 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception {\n assertThat(propertyId.getBucketByKey(\"2\").getDocCount(), equalTo(1L));\n assertThat(propertyId.getBucketByKey(\"3\").getDocCount(), equalTo(1L));\n }\n+\n+ public void testFilterAggInsideNestedAgg() throws Exception {\n+ assertAcked(prepareCreate(\"classes\")\n+ .addMapping(\"class\", jsonBuilder().startObject().startObject(\"class\").startObject(\"properties\")\n+ .startObject(\"name\").field(\"type\", \"text\").endObject()\n+ .startObject(\"methods\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"name\").field(\"type\", \"text\").endObject()\n+ .startObject(\"return_type\").field(\"type\", \"keyword\").endObject()\n+ .startObject(\"parameters\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"name\").field(\"type\", \"text\").endObject()\n+ .startObject(\"type\").field(\"type\", \"keyword\").endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject().endObject().endObject()));\n+\n+ client().prepareIndex(\"classes\", \"class\", \"1\").setSource(jsonBuilder().startObject()\n+ .field(\"name\", \"QueryBuilder\")\n+ .startArray(\"methods\")\n+ .startObject()\n+ .field(\"name\", \"toQuery\")\n+ .field(\"return_type\", \"Query\")\n+ .startArray(\"parameters\")\n+ .startObject()\n+ .field(\"name\", \"context\")\n+ .field(\"type\", \"QueryShardContext\")\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"name\", \"queryName\")\n+ .field(\"return_type\", \"QueryBuilder\")\n+ .startArray(\"parameters\")\n+ .startObject()\n+ .field(\"name\", \"queryName\")\n+ .field(\"type\", \"String\")\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"name\", \"boost\")\n+ .field(\"return_type\", \"QueryBuilder\")\n+ .startArray(\"parameters\")\n+ .startObject()\n+ .field(\"name\", \"boost\")\n+ .field(\"type\", \"float\")\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .endArray()\n+ .endObject()).get();\n+ client().prepareIndex(\"classes\", \"class\", \"2\").setSource(jsonBuilder().startObject()\n+ .field(\"name\", \"Document\")\n+ .startArray(\"methods\")\n+ .startObject()\n+ .field(\"name\", \"add\")\n+ .field(\"return_type\", \"void\")\n+ .startArray(\"parameters\")\n+ .startObject()\n+ .field(\"name\", \"field\")\n+ .field(\"type\", \"IndexableField\")\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"name\", \"removeField\")\n+ .field(\"return_type\", \"void\")\n+ .startArray(\"parameters\")\n+ .startObject()\n+ .field(\"name\", \"name\")\n+ .field(\"type\", \"String\")\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .startObject()\n+ .field(\"name\", \"removeFields\")\n+ .field(\"return_type\", \"void\")\n+ .startArray(\"parameters\")\n+ .startObject()\n+ .field(\"name\", \"name\")\n+ .field(\"type\", \"String\")\n+ .endObject()\n+ .endArray()\n+ .endObject()\n+ .endArray()\n+ .endObject()).get();\n+ refresh();\n+\n+ SearchResponse response = client().prepareSearch(\"classes\").addAggregation(nested(\"to_method\", \"methods\")\n+ .subAggregation(filter(\"num_string_params\",\n+ nestedQuery(\"methods.parameters\", termQuery(\"methods.parameters.type\", \"String\"), ScoreMode.None)))\n+ ).get();\n+ Nested toMethods = response.getAggregations().get(\"to_method\");\n+ Filter numStringParams = toMethods.getAggregations().get(\"num_string_params\");\n+ assertThat(numStringParams.getDocCount(), equalTo(3L));\n+\n+ response = client().prepareSearch(\"classes\").addAggregation(nested(\"to_method\", \"methods\")\n+ .subAggregation(terms(\"return_type\").field(\"methods.return_type\").subAggregation(\n+ filter(\"num_string_params\", nestedQuery(\"methods.parameters\", termQuery(\"methods.parameters.type\", \"String\"), ScoreMode.None))\n+ )\n+ )).get();\n+ toMethods = response.getAggregations().get(\"to_method\");\n+ Terms terms = toMethods.getAggregations().get(\"return_type\");\n+ Bucket bucket = terms.getBucketByKey(\"void\");\n+ assertThat(bucket.getDocCount(), equalTo(3L));\n+ numStringParams = bucket.getAggregations().get(\"num_string_params\");\n+ assertThat(numStringParams.getDocCount(), equalTo(2L));\n+\n+ bucket = terms.getBucketByKey(\"QueryBuilder\");\n+ assertThat(bucket.getDocCount(), equalTo(2L));\n+ numStringParams = bucket.getAggregations().get(\"num_string_params\");\n+ assertThat(numStringParams.getDocCount(), equalTo(1L));\n+\n+ bucket = terms.getBucketByKey(\"Query\");\n+ assertThat(bucket.getDocCount(), equalTo(1L));\n+ numStringParams = bucket.getAggregations().get(\"num_string_params\");\n+ assertThat(numStringParams.getDocCount(), equalTo(0L));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java", "status": "modified" }, { "diff": "@@ -55,9 +55,6 @@\n import static org.hamcrest.Matchers.sameInstance;\n import static org.hamcrest.core.IsNull.notNullValue;\n \n-/**\n- *\n- */\n @ESIntegTestCase.SuiteScopeTestCase\n public class ReverseNestedIT extends ESIntegTestCase {\n \n@@ -170,9 +167,9 @@ public void testSimpleReverseNestedToRoot() throws Exception {\n assertThat(bucket.getKeyAsString(), equalTo(\"1\"));\n assertThat(bucket.getDocCount(), equalTo(6L));\n ReverseNested reverseNested = bucket.getAggregations().get(\"nested1_to_field1\");\n- assertThat((long) reverseNested.getProperty(\"_count\"), equalTo(5L));\n+ assertThat(reverseNested.getProperty(\"_count\"), equalTo(5L));\n Terms tags = reverseNested.getAggregations().get(\"field1\");\n- assertThat((Terms) reverseNested.getProperty(\"field1\"), sameInstance(tags));\n+ assertThat(reverseNested.getProperty(\"field1\"), sameInstance(tags));\n List<Terms.Bucket> tagsBuckets = new ArrayList<>(tags.getBuckets());\n assertThat(tagsBuckets.size(), equalTo(6));\n assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo(\"c\"));\n@@ -472,14 +469,25 @@ public void testNonExistingNestedField() throws Exception {\n SearchResponse searchResponse = client().prepareSearch(\"idx\")\n .setQuery(matchAllQuery())\n .addAggregation(nested(\"nested2\", \"nested1.nested2\").subAggregation(reverseNested(\"incorrect\").path(\"nested3\")))\n- .execute().actionGet();\n+ .get();\n \n Nested nested = searchResponse.getAggregations().get(\"nested2\");\n- assertThat(nested, Matchers.notNullValue());\n+ assertThat(nested, notNullValue());\n assertThat(nested.getName(), equalTo(\"nested2\"));\n \n ReverseNested reverseNested = nested.getAggregations().get(\"incorrect\");\n assertThat(reverseNested.getDocCount(), is(0L));\n+\n+ // Test that parsing the reverse_nested agg doesn't fail, because the parent nested agg is unmapped:\n+ searchResponse = client().prepareSearch(\"idx\")\n+ .setQuery(matchAllQuery())\n+ .addAggregation(nested(\"incorrect1\", \"incorrect1\").subAggregation(reverseNested(\"incorrect2\").path(\"incorrect2\")))\n+ .get();\n+\n+ nested = searchResponse.getAggregations().get(\"incorrect1\");\n+ assertThat(nested, notNullValue());\n+ assertThat(nested.getName(), equalTo(\"incorrect1\"));\n+ assertThat(nested.getDocCount(), is(0L));\n }\n \n public void testSameParentDocHavingMultipleBuckets() throws Exception {", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java", "status": "modified" } ] }
{ "body": "\"aggs\" accidentally got deprecated as a field name in requests, we should un-deprecate it since it is a supported parameter in search requests.\n\n```\nHTTP/1.1 500 Internal Server Error\nWarning: Deprecated field [aggs] used, expected [aggregations] instead\nContent-Type: application/json; charset=UTF-8\nContent-Encoding: gzip\nContent-Length: 556\n```\n", "comments": [ { "body": "I think it'd be nice to wait until #19509 is merged so we can assert things about headers in the REST tests.\n", "created_at": "2016-07-20T13:25:19Z" }, { "body": "Oops, I think this was me sorry. The problem is here: https://github.com/elastic/elasticsearch/blob/dec620c0b08213033dbfcacfc4b37663432a1a5c/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java#L94\n\nThe issue is that currently `ParseField` expects there to be only one acceptable name for the field and all other names are taken as deprecated. So the question here is do we want to deprecate either `aggs` or `aggregations` or do we want to add the ability to specify alternative names in `ParseField`.\n\nIf it is the latter I would suggest keeping the constructor in ParseField as is so only the first name is taken as an acceptable name and the rest are deprecated, and then add a `addAlternateName(String)` method for the few cases where we have alternative acceptable name.\n\nAs for #19509 it would be great if post that PR we could add a check to all calls made in rest tests to ensure they do not contain a warning header and if so fail the test (since most tests should not be using deprecated options). For any tests that do need to test deprecated options we could add a `accept_deprecated:true` to the YAML tests to bypass this warning header check.\n\nwdyt?\n", "created_at": "2016-07-21T09:08:52Z" }, { "body": "Actually we should also set the REST tests to have strict parsing anyway so using deprecated functionality causes an exception and the request to be rejected (unless the `accept_deprecated:true` option is set on the YAML test)\n", "created_at": "2016-07-21T10:59:18Z" }, { "body": "I opened https://github.com/elastic/elasticsearch/pull/19533 to fix this using the `ParseField` changes I mentioned above. I decided to keep `ParseField` immutable so the alternative names are passed into a constructor rather than in an add method.\n", "created_at": "2016-07-21T12:14:00Z" }, { "body": "> the request to be rejected (unless the accept_deprecated:true option is set on the YAML test)\n\nI think it might be better to call it `expect_deprecated:true` and then have the same semantics, but also fail if it _doesn't_ get a deprecation warning returned.\n", "created_at": "2016-07-28T16:04:12Z" }, { "body": "> I think it might be better to call it expect_deprecated:true and then have the same semantics, but also fail if it doesn't get a deprecation warning returned.\n\nYes, that sounds good. I'd honestly really like to make that change.\n", "created_at": "2016-07-28T16:23:53Z" } ], "number": 19504, "title": "Undeprecate \"aggs\" in search request bodies" }
{ "body": "Previously all names except the first passed to the `ParseField` were\nconsidered deprecated. But we have a few cases where a field has more\nthan one acceptable (non-deprecated) name (e.g. `aggregations`\nand `aggs` for declaring aggregations.\n\nThis change adds a constructor to `ParseField` which allows alternative\nnames to be added as well as deprecated names. The change is fully\nbackwards compatible as The previous constructor still exists.\n\nNote that it is intentionally a little awkward to specify a field with\nalternative names since this is something that should only be used in\nrare circumstances and should not become the norm.\n\nCloses #19504\n", "number": 19533, "review_comments": [ { "body": "I don't think you need EMPTY_ARRAY here.\n", "created_at": "2016-07-21T12:16:10Z" }, { "body": "I need it to force it to select the three arg constructor rather than the two arg one which would cause `\"aggs\"` to be deprecated instead of set as an alternative. This is what I meant in the commit message about it being intentionally slightly awkward\n", "created_at": "2016-07-21T12:19:54Z" }, { "body": "That makes sense, yeah.\n", "created_at": "2016-07-21T14:36:17Z" }, { "body": "Can we add a bit of docs or code comments... somewhere... anywhere... to this file, to explain the distinction of alternative vs deprecated.\n\nIt makes total sense given what you have explained on the PR here, but just looking at the code, without the context of the PR, it can be a bit confusing. \n\nMatching logic and so on in this class is already confusing (given it has allReplacedWith and allNames, neither of which i have a clue what is doing). So it could really help to disambiguate it!\n", "created_at": "2016-07-22T12:51:25Z" }, { "body": "@rmuir sure, thanks for highlighting this. You are right, this class is quite confusing without the context of the PR. I'll add some documentation around the alternativeNames and deprecated names stuff and I'll also try to document the allReplacedWith bits too\n", "created_at": "2016-07-22T13:10:32Z" } ], "title": "Accept alternative non-deprecated names in ParseField" }
{ "commits": [ { "message": "Accept alternative non-deprecated names in ParseField\n\nPreviously all names except the first passed to the `ParseField` were\nconsidered deprecated. But we have a few cases where a field has more\nthan one acceptable (non-deprecated) name (e.g. `aggregations`\nand `aggs` for declaring aggregations.\n\nThis change adds a constructor to `ParseField` which allows alternative\nnames to be added as well as deprecated names. The change is fully\nbackwards compatible as The previous constructor still exists.\n\nNote that it is intentionally a little awkward to specify a field with\nalternative names since this is something that should only be used in\nrare circumstances and should not become the norm.\n\nCloses #19504" }, { "message": "Added JavaDocs and comments to ParseField" } ], "files": [ { "diff": "@@ -26,19 +26,52 @@\n import java.util.Set;\n \n /**\n- * Holds a field that can be found in a request while parsing and its different variants, which may be deprecated.\n+ * Holds a field that can be found in a request while parsing and its different\n+ * variants, which may be deprecated.\n */\n public class ParseField {\n \n private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class));\n \n private final String name;\n+ private final String[] alternativeNames;\n private final String[] deprecatedNames;\n private String allReplacedWith = null;\n private final String[] allNames;\n \n+ /**\n+ * @param name\n+ * the primary name for this field. This will be returned by\n+ * {@link #getPreferredName()}\n+ * @param deprecatedNames\n+ * names for this field which are deprecated and will not be\n+ * accepted when strict matching is used.\n+ */\n public ParseField(String name, String... deprecatedNames) {\n+ this(name, null, deprecatedNames);\n+ }\n+\n+ /**\n+ * @param name\n+ * the primary name for this field. This will be returned by\n+ * {@link #getPreferredName()}\n+ * @param alternativeNames\n+ * other names for this field which are not acceptable and not\n+ * deprecated. This names will be accepted regardless of whether\n+ * strict mode is used\n+ * @param deprecatedNames\n+ * other names for this field which are deprecated and will not\n+ * be accepted when strict matching is used.\n+ */\n+ public ParseField(String name, String[] alternativeNames, String... deprecatedNames) {\n this.name = name;\n+ if (alternativeNames == null || alternativeNames.length == 0) {\n+ this.alternativeNames = Strings.EMPTY_ARRAY;\n+ } else {\n+ final HashSet<String> set = new HashSet<>();\n+ Collections.addAll(set, alternativeNames);\n+ this.alternativeNames = set.toArray(new String[set.size()]);\n+ }\n if (deprecatedNames == null || deprecatedNames.length == 0) {\n this.deprecatedNames = Strings.EMPTY_ARRAY;\n } else {\n@@ -48,41 +81,91 @@ public ParseField(String name, String... deprecatedNames) {\n }\n Set<String> allNames = new HashSet<>();\n allNames.add(name);\n+ Collections.addAll(allNames, this.alternativeNames);\n Collections.addAll(allNames, this.deprecatedNames);\n this.allNames = allNames.toArray(new String[allNames.size()]);\n }\n \n- public String getPreferredName(){\n+ /**\n+ * @return the preferred name used for this field\n+ */\n+ public String getPreferredName() {\n return name;\n }\n \n+ /**\n+ * @return and array of the alternative names which are acceptable for this\n+ * field and are not deprecated\n+ */\n+ public String[] getAlternativeNames() {\n+ return alternativeNames;\n+ }\n+\n+ /**\n+ * @return All names for this field regardless of whether they are\n+ * deprecated\n+ */\n public String[] getAllNamesIncludedDeprecated() {\n return allNames;\n }\n \n+ /**\n+ * @param deprecatedNames\n+ * deprecated names to include with the returned\n+ * {@link ParseField}\n+ * @return a new {@link ParseField} using the preferred name and alternative\n+ * names from this one but with the specified deprecated names\n+ */\n public ParseField withDeprecation(String... deprecatedNames) {\n- return new ParseField(this.name, deprecatedNames);\n+ return new ParseField(this.name, alternativeNames, deprecatedNames);\n }\n \n /**\n- * Return a new ParseField where all field names are deprecated and replaced with {@code allReplacedWith}.\n+ * Return a new ParseField where all field names are deprecated and replaced\n+ * with {@code allReplacedWith}.\n */\n public ParseField withAllDeprecated(String allReplacedWith) {\n ParseField parseField = this.withDeprecation(getAllNamesIncludedDeprecated());\n parseField.allReplacedWith = allReplacedWith;\n return parseField;\n }\n \n- boolean match(String currentFieldName, boolean strict) {\n- if (allReplacedWith == null && currentFieldName.equals(name)) {\n- return true;\n+ /**\n+ * @param fieldName\n+ * the field name to match against this {@link ParseField}\n+ * @param strict\n+ * if true an exception will be thrown if a deprecated field name\n+ * is given. If false the deprecated name will be matched but a\n+ * message will also be logged to the {@link DeprecationLogger}\n+ * @return true if <code>fieldName</code> matches any of the acceptable\n+ * names for this {@link ParseField}.\n+ */\n+ boolean match(String fieldName, boolean strict) {\n+ // if this parse field has not been completely deprecated then try to\n+ // match either the preferred name or one of the alternative names\n+ if (allReplacedWith == null) {\n+ if (fieldName.equals(name)) {\n+ return true;\n+ } else {\n+ for (String altName : alternativeNames) {\n+ if (fieldName.equals(altName)) {\n+ return true;\n+ }\n+ }\n+ }\n }\n+ // Now try to match against one of the deprecated names. Note that if\n+ // the parse field is entirely deprecated (allReplacedWith != null) all\n+ // fields will be in the deprecatedNames array\n String msg;\n for (String depName : deprecatedNames) {\n- if (currentFieldName.equals(depName)) {\n- msg = \"Deprecated field [\" + currentFieldName + \"] used, expected [\" + name + \"] instead\";\n+ if (fieldName.equals(depName)) {\n+ msg = \"Deprecated field [\" + fieldName + \"] used, expected [\" + name + \"] instead\";\n if (allReplacedWith != null) {\n- msg = \"Deprecated field [\" + currentFieldName + \"] used, replaced by [\" + allReplacedWith + \"]\";\n+ // If the field is entirely deprecated then there is no\n+ // preferred name so instead use the `allReplaceWith`\n+ // message to indicate what should be used instead\n+ msg = \"Deprecated field [\" + fieldName + \"] used, replaced by [\" + allReplacedWith + \"]\";\n }\n if (strict) {\n throw new IllegalArgumentException(msg);\n@@ -100,10 +183,20 @@ public String toString() {\n return getPreferredName();\n }\n \n+ /**\n+ * @return the message to use if this {@link ParseField} has been entirely\n+ * deprecated in favor of something else. This method will return\n+ * <code>null</code> if the ParseField has not been completely\n+ * deprecated.\n+ */\n public String getAllReplacedWith() {\n return allReplacedWith;\n }\n \n+ /**\n+ * @return an array of the names for the {@link ParseField} which are\n+ * deprecated.\n+ */\n public String[] getDeprecatedNames() {\n return deprecatedNames;\n }", "filename": "core/src/main/java/org/elasticsearch/common/ParseField.java", "status": "modified" }, { "diff": "@@ -91,7 +91,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ\n public static final ParseField SORT_FIELD = new ParseField(\"sort\");\n public static final ParseField TRACK_SCORES_FIELD = new ParseField(\"track_scores\");\n public static final ParseField INDICES_BOOST_FIELD = new ParseField(\"indices_boost\");\n- public static final ParseField AGGREGATIONS_FIELD = new ParseField(\"aggregations\", \"aggs\");\n+ public static final ParseField AGGREGATIONS_FIELD = new ParseField(\"aggregations\", new String[] { \"aggs\" }, Strings.EMPTY_ARRAY);\n public static final ParseField HIGHLIGHT_FIELD = new ParseField(\"highlight\");\n public static final ParseField SUGGEST_FIELD = new ParseField(\"suggest\");\n public static final ParseField RESCORE_FIELD = new ParseField(\"rescore\");\n@@ -998,7 +998,7 @@ public void parseXContent(QueryParseContext context, AggregatorParsers aggParser\n scriptFields.add(new ScriptField(context));\n }\n } else if (context.getParseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {\n- indexBoost = new ObjectFloatHashMap<String>();\n+ indexBoost = new ObjectFloatHashMap<>();\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();", "filename": "core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java", "status": "modified" }, { "diff": "@@ -62,6 +62,54 @@ public void testParse() {\n }\n }\n \n+ public void testParseWithAlternatives() {\n+ String name = \"foo_bar\";\n+ String[] alternatives = new String[] { \"bazFoo\", \"baz_foo\", \"Foobaz\" };\n+ ParseField field = new ParseField(name, alternatives, Strings.EMPTY_ARRAY);\n+ String[] deprecated = new String[] { \"barFoo\", \"bar_foo\", \"Foobar\" };\n+ ParseField withDeprecations = field.withDeprecation(deprecated);\n+ assertThat(field, not(sameInstance(withDeprecations)));\n+ assertThat(field.match(name, false), is(true));\n+ assertThat(field.match(\"foo bar\", false), is(false));\n+ for (String alternativeName : alternatives) {\n+ assertThat(field.match(alternativeName, false), is(true));\n+ }\n+ for (String deprecatedName : deprecated) {\n+ assertThat(field.match(deprecatedName, false), is(false));\n+ }\n+\n+ assertThat(withDeprecations.match(name, false), is(true));\n+ assertThat(withDeprecations.match(\"foo bar\", false), is(false));\n+ for (String alternativeName : alternatives) {\n+ assertThat(withDeprecations.match(alternativeName, false), is(true));\n+ }\n+ for (String deprecatedName : deprecated) {\n+ assertThat(withDeprecations.match(deprecatedName, false), is(true));\n+ }\n+\n+ // now with strict mode\n+ assertThat(field.match(name, true), is(true));\n+ assertThat(field.match(\"foo bar\", true), is(false));\n+ for (String alternativeName : alternatives) {\n+ assertThat(field.match(alternativeName, true), is(true));\n+ }\n+ for (String deprecatedName : deprecated) {\n+ assertThat(field.match(deprecatedName, true), is(false));\n+ }\n+\n+ assertThat(withDeprecations.match(name, true), is(true));\n+ assertThat(withDeprecations.match(\"foo bar\", true), is(false));\n+ for (String alternativeName : alternatives) {\n+ assertThat(field.match(alternativeName, true), is(true));\n+ }\n+ for (String deprecatedName : deprecated) {\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {\n+ withDeprecations.match(deprecatedName, true);\n+ });\n+ assertThat(e.getMessage(), containsString(\"used, expected [foo_bar] instead\"));\n+ }\n+ }\n+\n public void testAllDeprecated() {\n String name = \"like_text\";\n \n@@ -93,11 +141,49 @@ public void testAllDeprecated() {\n assertThat(e.getMessage(), containsString(\" used, replaced by [like]\"));\n }\n \n+ public void testAllDeprecatedWithAlternatives() {\n+ String name = \"like_text\";\n+ String[] alternatives = new String[] { \"alt_name\", \"another_name\" };\n+ boolean withDeprecatedNames = randomBoolean();\n+ String[] deprecated = new String[] { \"text\", \"same_as_text\" };\n+ String[] allValues;\n+ if (withDeprecatedNames) {\n+ String[] newArray = new String[1 + alternatives.length + deprecated.length];\n+ newArray[0] = name;\n+ System.arraycopy(alternatives, 0, newArray, 1, alternatives.length);\n+ System.arraycopy(deprecated, 0, newArray, 1 + alternatives.length, deprecated.length);\n+ allValues = newArray;\n+ } else {\n+ String[] newArray = new String[1 + alternatives.length];\n+ newArray[0] = name;\n+ System.arraycopy(alternatives, 0, newArray, 1, alternatives.length);\n+ allValues = newArray;\n+ }\n+\n+ ParseField field;\n+ if (withDeprecatedNames) {\n+ field = new ParseField(name, alternatives, Strings.EMPTY_ARRAY).withDeprecation(deprecated).withAllDeprecated(\"like\");\n+ } else {\n+ field = new ParseField(name, alternatives, Strings.EMPTY_ARRAY).withAllDeprecated(\"like\");\n+ }\n+\n+ // strict mode off\n+ assertThat(field.match(randomFrom(allValues), false), is(true));\n+ assertThat(field.match(\"not a field name\", false), is(false));\n+\n+ // now with strict mode\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> field.match(randomFrom(allValues), true));\n+ assertThat(e.getMessage(), containsString(\" used, replaced by [like]\"));\n+ }\n+\n public void testGetAllNamesIncludedDeprecated() {\n ParseField parseField = new ParseField(\"terms\", \"in\");\n assertThat(parseField.getAllNamesIncludedDeprecated(), arrayContainingInAnyOrder(\"terms\", \"in\"));\n \n parseField = new ParseField(\"more_like_this\", \"mlt\");\n assertThat(parseField.getAllNamesIncludedDeprecated(), arrayContainingInAnyOrder(\"more_like_this\", \"mlt\"));\n+\n+ parseField = new ParseField(\"foo\", new String[] { \"bar\" }, \"baz\");\n+ assertThat(parseField.getAllNamesIncludedDeprecated(), arrayContainingInAnyOrder(\"foo\", \"bar\", \"baz\"));\n }\n }", "filename": "core/src/test/java/org/elasticsearch/common/ParseFieldTests.java", "status": "modified" } ] }
{ "body": "In the lack of tests the analyzer.alias feature was pretty much not working\nat all on current master. Issues like #19163 showed some serious problems for users\nusing this feature upgrading to an alpha version.\nThis change fixes the processing order and allows aliases to be set for\nexisting analyzers like `default`. This change also ensures that if `default`\nis aliased the correct analyzer is used for `default_search` etc.\n\nCloses #19163\n", "comments": [ { "body": "I know it has existed for a while, but I never knew about the feature until now. What problem does it solve? It seems to add indirection without any benefit (an alias should not be able to be changed, or it would mean an analyzer could be swapped out, which would completely break existing mappings using it).\n", "created_at": "2016-07-19T21:52:56Z" }, { "body": "> I know it has existed for a while, but I never knew about the feature until now. What problem does it solve? It seems to add indirection without any benefit (an alias should not be able to be changed, or it would mean an analyzer could be swapped out, which would completely break existing mappings using it).\n\nit does - I am going to open an issue to remove it but I first need to find a good way for BWC. Until then I want it at least to work without relying on hash map iteration order\n", "created_at": "2016-07-20T07:15:45Z" }, { "body": "@jpountz I pushed new tests\n", "created_at": "2016-07-20T13:14:25Z" }, { "body": "LGTM\n", "created_at": "2016-07-20T14:07:44Z" }, { "body": "test this please\n", "created_at": "2016-07-20T20:33:55Z" } ], "number": 19506, "title": "Fix analyzer alias processing" }
{ "body": "The Ruby YAML parser ignores the `do` actions when they are not indented, making the test suite fail.\n\nCan you please review, @s1monw?\n\nRelated: #19506 \n", "number": 19529, "review_comments": [], "title": "Test: Fixed incorrect YAML indentation in the `indices.put_template/10_basic.yaml` test" }
{ "commits": [ { "message": "Test: Fixed incorrect YAML indentation in the `indices.put_template/10_basic.yaml` test\n\nThe Ruby YAML parser ignores the `do` actions when they are not indented,\nmaking the test suite fail.\n\nRelated: #19506\n\nCloses #19529" } ], "files": [ { "diff": "@@ -87,34 +87,34 @@\n index.analysis.analyzer.foobar_search.type: \"standard\"\n \n - do:\n- index:\n- index: test_index\n- type: test\n- body: { field: \"the quick brown fox\" }\n+ index:\n+ index: test_index\n+ type: test\n+ body: { field: \"the quick brown fox\" }\n \n - do:\n- indices.refresh:\n- index: test_index\n+ indices.refresh:\n+ index: test_index\n \n - do:\n- search:\n- index: test_index\n- type: test\n- body:\n- query:\n- term:\n- field: \"the quick brown fox\"\n+ search:\n+ index: test_index\n+ type: test\n+ body:\n+ query:\n+ term:\n+ field: \"the quick brown fox\"\n \n - match: {hits.total: 1}\n \n - do:\n- search:\n- index: test_index\n- type: test\n- body:\n- query:\n- match:\n- field: \"the quick brown fox\"\n+ search:\n+ index: test_index\n+ type: test\n+ body:\n+ query:\n+ match:\n+ field: \"the quick brown fox\"\n \n - match: {hits.total: 0}\n ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml", "status": "modified" } ] }
{ "body": "According to the docs, the `http.cors.allow-methods` setting should default to `OPTIONS,HEAD,GET,POST,PUT,DELETE`, but it appears to default to no methods at all.\n\nWith the default settings:\n\n```\ncurl -H 'Origin: http://localhost:8000' -H 'Access-Control-Request-Method: PUT' -i -X OPTIONS localhost:9200/t/t/1 -d{}\n```\n\nreturns:\n\n```\nHTTP/1.1 200 OK\nAccess-Control-Allow-Origin: http://localhost:8000\nVary: Origin\nAccess-Control-Max-Age: 1728000\ndate: Wed, 20 Jul 2016 14:37:09 GMT\ncontent-length: 0\n```\n\nIf I set `http.cors.allow-methods` to `OPTIONS,HEAD,GET,POST,PUT,DELETE`, it returns:\n\n```\nHTTP/1.1 200 OK\nAccess-Control-Allow-Origin: http://localhost:8000\nVary: Origin\nAccess-Control-Allow-Methods: HEAD\nAccess-Control-Allow-Methods: DELETE\nAccess-Control-Allow-Methods: POST\nAccess-Control-Allow-Methods: GET\nAccess-Control-Allow-Methods: OPTIONS\nAccess-Control-Allow-Methods: PUT\nAccess-Control-Max-Age: 1728000\ndate: Wed, 20 Jul 2016 14:38:18 GMT\ncontent-length: 0\n```\n", "comments": [], "number": 19520, "title": "Cors Access-Request-Control-Headers ignoring defaults" }
{ "body": "Fixes CORS handling so that it uses the defaults for for http.cors.allow-methods\nand http.cors.allow-headers if none are specified in the config.\n\nCloses #19520\n", "number": 19522, "review_comments": [ { "body": "can you just call spliStringToSet with true? why do we need another variant when the method this calls is public\n", "created_at": "2016-07-20T17:25:40Z" }, { "body": "actually, i see 9 uses of the original method, and none look like they would want to retain whitespace. i think we should just change the behavior?\n", "created_at": "2016-07-20T17:34:41Z" }, { "body": "yeah i wasn't sure what was best, most uses are reading rest request params which shouldn't have the whitespace anyway. i'm up for changing it to always do trim (unless that's the delimiter char) and not have that option on the method. that would make my extra wrapper method even more unnecessary and could go away.\n", "created_at": "2016-07-20T17:36:59Z" }, { "body": "Instead of this, I think you could do just like with start, and have an `end` var that marks the current end of the token, and do not increment it when you find whitespace?\n", "created_at": "2016-07-22T16:08:40Z" }, { "body": "This means the size of the token would always be `end - start` too.\n", "created_at": "2016-07-22T16:09:25Z" }, { "body": "I pushed https://github.com/elastic/elasticsearch/pull/19522/commits/34c5b1ea13aa3ecfa5d2d08140401ad7e196f482\n", "created_at": "2016-07-22T16:21:58Z" } ], "title": "Fixes CORS handling so that it uses the defaults" }
{ "commits": [ { "message": "Fixes CORS handling so that it uses the defaults for http.cors.allow-methods\nand http.cors.allow-headers if none are specified in the config.\n\nCloses #19520" }, { "message": "Make trimming the default for Strings#splitStringsToSet" }, { "message": "Fix the Strings.splitStringToSet method so it trims whitespace from\nthe beginning and end of all split strings." }, { "message": "Improve splitting method" } ], "files": [ { "diff": "@@ -33,6 +33,7 @@\n import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.Collection;\n+import java.util.Collections;\n import java.util.HashSet;\n import java.util.Iterator;\n import java.util.LinkedList;\n@@ -509,7 +510,19 @@ public static String[] splitStringByCommaToArray(final String s) {\n else return s.split(\",\");\n }\n \n+ /**\n+ * A convenience method for splitting a delimited string into\n+ * a set and trimming leading and trailing whitespace from all\n+ * split strings.\n+ *\n+ * @param s the string to split\n+ * @param c the delimiter to split on\n+ * @return the set of split strings\n+ */\n public static Set<String> splitStringToSet(final String s, final char c) {\n+ if (s == null || s.isEmpty()) {\n+ return Collections.emptySet();\n+ }\n final char[] chars = s.toCharArray();\n int count = 1;\n for (final char x : chars) {\n@@ -521,16 +534,25 @@ public static Set<String> splitStringToSet(final String s, final char c) {\n final int len = chars.length;\n int start = 0; // starting index in chars of the current substring.\n int pos = 0; // current index in chars.\n+ int end = 0; // the position of the end of the current token\n for (; pos < len; pos++) {\n if (chars[pos] == c) {\n- int size = pos - start;\n+ int size = end - start;\n if (size > 0) { // only add non empty strings\n result.add(new String(chars, start, size));\n }\n start = pos + 1;\n+ end = start;\n+ } else if (Character.isWhitespace(chars[pos])) {\n+ if (start == pos) {\n+ // skip over preceding whitespace\n+ start++;\n+ }\n+ } else {\n+ end = pos + 1;\n }\n }\n- int size = pos - start;\n+ int size = end - start;\n if (size > 0) {\n result.add(new String(chars, start, size));\n }", "filename": "core/src/main/java/org/elasticsearch/common/Strings.java", "status": "modified" }, { "diff": "@@ -40,9 +40,9 @@ public final class HttpTransportSettings {\n public static final Setting<Integer> SETTING_CORS_MAX_AGE =\n Setting.intSetting(\"http.cors.max-age\", 1728000, Property.NodeScope);\n public static final Setting<String> SETTING_CORS_ALLOW_METHODS =\n- new Setting<>(\"http.cors.allow-methods\", \"OPTIONS, HEAD, GET, POST, PUT, DELETE\", (value) -> value, Property.NodeScope);\n+ new Setting<>(\"http.cors.allow-methods\", \"OPTIONS,HEAD,GET,POST,PUT,DELETE\", (value) -> value, Property.NodeScope);\n public static final Setting<String> SETTING_CORS_ALLOW_HEADERS =\n- new Setting<>(\"http.cors.allow-headers\", \"X-Requested-With, Content-Type, Content-Length\", (value) -> value, Property.NodeScope);\n+ new Setting<>(\"http.cors.allow-headers\", \"X-Requested-With,Content-Type,Content-Length\", (value) -> value, Property.NodeScope);\n public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS =\n Setting.boolSetting(\"http.cors.allow-credentials\", false, Property.NodeScope);\n public static final Setting<Boolean> SETTING_PIPELINING =", "filename": "core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.common;\n \n+import org.elasticsearch.common.util.set.Sets;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.test.ESTestCase;\n@@ -73,4 +74,32 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n assertThat(toString, containsString(\"\\\"ok\\\":\\\"here\\\"\"));\n assertThat(toString, containsString(\"\\\"catastrophe\\\":\\\"\\\"\"));\n }\n+\n+ public void testSplitStringToSet() {\n+ assertEquals(Strings.splitStringByCommaToSet(null), Sets.newHashSet());\n+ assertEquals(Strings.splitStringByCommaToSet(\"\"), Sets.newHashSet());\n+ assertEquals(Strings.splitStringByCommaToSet(\"a,b,c\"), Sets.newHashSet(\"a\",\"b\",\"c\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\"a, b, c\"), Sets.newHashSet(\"a\",\"b\",\"c\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\" a , b, c \"), Sets.newHashSet(\"a\",\"b\",\"c\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\"aa, bb, cc\"), Sets.newHashSet(\"aa\",\"bb\",\"cc\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\" a \"), Sets.newHashSet(\"a\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\" a \"), Sets.newHashSet(\"a\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\" aa \"), Sets.newHashSet(\"aa\"));\n+ assertEquals(Strings.splitStringByCommaToSet(\" \"), Sets.newHashSet());\n+\n+ assertEquals(Strings.splitStringToSet(null, ' '), Sets.newHashSet());\n+ assertEquals(Strings.splitStringToSet(\"\", ' '), Sets.newHashSet());\n+ assertEquals(Strings.splitStringToSet(\"a b c\", ' '), Sets.newHashSet(\"a\",\"b\",\"c\"));\n+ assertEquals(Strings.splitStringToSet(\"a, b, c\", ' '), Sets.newHashSet(\"a,\",\"b,\",\"c\"));\n+ assertEquals(Strings.splitStringToSet(\" a b c \", ' '), Sets.newHashSet(\"a\",\"b\",\"c\"));\n+ assertEquals(Strings.splitStringToSet(\" a b c \", ' '), Sets.newHashSet(\"a\",\"b\",\"c\"));\n+ assertEquals(Strings.splitStringToSet(\"aa bb cc\", ' '), Sets.newHashSet(\"aa\",\"bb\",\"cc\"));\n+ assertEquals(Strings.splitStringToSet(\" a \", ' '), Sets.newHashSet(\"a\"));\n+ assertEquals(Strings.splitStringToSet(\" a \", ' '), Sets.newHashSet(\"a\"));\n+ assertEquals(Strings.splitStringToSet(\" a \", ' '), Sets.newHashSet(\"a\"));\n+ assertEquals(Strings.splitStringToSet(\"a \", ' '), Sets.newHashSet(\"a\"));\n+ assertEquals(Strings.splitStringToSet(\" aa \", ' '), Sets.newHashSet(\"aa\"));\n+ assertEquals(Strings.splitStringToSet(\"aa \", ' '), Sets.newHashSet(\"aa\"));\n+ assertEquals(Strings.splitStringToSet(\" \", ' '), Sets.newHashSet());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/common/StringsTests.java", "status": "modified" }, { "diff": "@@ -81,9 +81,11 @@\n import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.List;\n+import java.util.Set;\n import java.util.concurrent.Executors;\n import java.util.concurrent.atomic.AtomicReference;\n import java.util.regex.Pattern;\n+import java.util.stream.Collectors;\n \n import static org.elasticsearch.common.settings.Setting.boolSetting;\n import static org.elasticsearch.common.settings.Setting.byteSizeSetting;\n@@ -390,14 +392,10 @@ private Netty3CorsConfig buildCorsConfig(Settings settings) {\n if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {\n builder.allowCredentials();\n }\n- String[] strMethods = settings.getAsArray(SETTING_CORS_ALLOW_METHODS.getKey());\n- HttpMethod[] methods = Arrays.asList(strMethods)\n- .stream()\n- .map(HttpMethod::valueOf)\n- .toArray(size -> new HttpMethod[size]);\n- return builder.allowedRequestMethods(methods)\n+ Set<String> strMethods = Strings.splitStringByCommaToSet(SETTING_CORS_ALLOW_METHODS.get(settings));\n+ return builder.allowedRequestMethods(strMethods.stream().map(HttpMethod::valueOf).collect(Collectors.toSet()))\n .maxAge(SETTING_CORS_MAX_AGE.get(settings))\n- .allowedRequestHeaders(settings.getAsArray(SETTING_CORS_ALLOW_HEADERS.getKey()))\n+ .allowedRequestHeaders(Strings.splitStringByCommaToSet(SETTING_CORS_ALLOW_HEADERS.get(settings)))\n .shortCircuit()\n .build();\n }", "filename": "modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java", "status": "modified" }, { "diff": "@@ -193,8 +193,8 @@ public Netty3CorsConfigBuilder maxAge(final long max) {\n * @param methods the {@link HttpMethod}s that should be allowed.\n * @return {@link Netty3CorsConfigBuilder} to support method chaining.\n */\n- public Netty3CorsConfigBuilder allowedRequestMethods(final HttpMethod... methods) {\n- requestMethods.addAll(Arrays.asList(methods));\n+ public Netty3CorsConfigBuilder allowedRequestMethods(final Set<HttpMethod> methods) {\n+ requestMethods.addAll(methods);\n return this;\n }\n \n@@ -214,8 +214,8 @@ public Netty3CorsConfigBuilder allowedRequestMethods(final HttpMethod... methods\n * @param headers the headers to be added to the preflight 'Access-Control-Allow-Headers' response header.\n * @return {@link Netty3CorsConfigBuilder} to support method chaining.\n */\n- public Netty3CorsConfigBuilder allowedRequestHeaders(final String... headers) {\n- requestHeaders.addAll(Arrays.asList(headers));\n+ public Netty3CorsConfigBuilder allowedRequestHeaders(final Set<String> headers) {\n+ requestHeaders.addAll(headers);\n return this;\n }\n ", "filename": "modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/cors/Netty3CorsConfigBuilder.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.elasticsearch.common.network.NetworkService;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.util.MockBigArrays;\n+import org.elasticsearch.common.util.set.Sets;\n import org.elasticsearch.http.netty3.cors.Netty3CorsConfig;\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.test.ESTestCase;\n@@ -86,4 +87,19 @@ public void testCorsConfig() {\n assertThat(corsConfig.allowedRequestMethods().stream().map(HttpMethod::getName).collect(Collectors.toSet()), equalTo(methods));\n transport.close();\n }\n+\n+ public void testCorsConfigDefaults() {\n+ final Set<String> headers = Sets.newHashSet(\"X-Requested-With\", \"Content-Type\", \"Content-Length\");\n+ final Set<String> methods = Sets.newHashSet(\"OPTIONS\", \"HEAD\", \"GET\", \"POST\", \"PUT\", \"DELETE\");\n+ final Settings settings = Settings.builder()\n+ .put(SETTING_CORS_ENABLED.getKey(), true)\n+ .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), \"*\")\n+ .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true)\n+ .build();\n+ final Netty3HttpServerTransport transport = new Netty3HttpServerTransport(settings, networkService, bigArrays, threadPool);\n+ final Netty3CorsConfig corsConfig = transport.getCorsConfig();\n+ assertThat(corsConfig.allowedRequestHeaders(), equalTo(headers));\n+ assertThat(corsConfig.allowedRequestMethods().stream().map(HttpMethod::getName).collect(Collectors.toSet()), equalTo(methods));\n+ transport.close();\n+ }\n }", "filename": "modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java", "status": "modified" } ] }
{ "body": "This one originally came up in https://discuss.elastic.co/t/the-auto-generate-phrase-queries-parameter-of-query-string-query-was-renamed-to-auto-generated-phrase-queries/55934.\n", "comments": [], "number": 19512, "title": "auto_generate_phrase_queries accidentally renamed to auto_generated_phrase_queries" }
{ "body": "During query refactoring the query string query parameter\n'auto_generate_phrase_queries' was accidentally renamed\nto 'auto_generated_phrase_queries'.\n\nWith this commit we restore the old name.\n\nCloses #19512\n", "number": 19514, "review_comments": [], "title": "Restore parameter name auto_generate_phrase_queries" }
{ "commits": [ { "message": "Restore parameter name auto_generate_phrase_queries\n\nDuring query refactoring the query string query parameter\n'auto_generate_phrase_queries' was accidentally renamed\nto 'auto_generated_phrase_queries'.\n\nWith this commit we restore the old name.\n\nCloses #19512" } ], "files": [ { "diff": "@@ -82,7 +82,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue\n private static final ParseField ANALYZER_FIELD = new ParseField(\"analyzer\");\n private static final ParseField QUOTE_ANALYZER_FIELD = new ParseField(\"quote_analyzer\");\n private static final ParseField ALLOW_LEADING_WILDCARD_FIELD = new ParseField(\"allow_leading_wildcard\");\n- private static final ParseField AUTO_GENERATED_PHRASE_QUERIES_FIELD = new ParseField(\"auto_generated_phrase_queries\");\n+ private static final ParseField AUTO_GENERATE_PHRASE_QUERIES_FIELD = new ParseField(\"auto_generate_phrase_queries\");\n private static final ParseField MAX_DETERMINED_STATES_FIELD = new ParseField(\"max_determined_states\");\n private static final ParseField LOWERCASE_EXPANDED_TERMS_FIELD = new ParseField(\"lowercase_expanded_terms\");\n private static final ParseField ENABLE_POSITION_INCREMENTS_FIELD = new ParseField(\"enable_position_increment\");\n@@ -594,7 +594,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n if (this.quoteAnalyzer != null) {\n builder.field(QUOTE_ANALYZER_FIELD.getPreferredName(), this.quoteAnalyzer);\n }\n- builder.field(AUTO_GENERATED_PHRASE_QUERIES_FIELD.getPreferredName(), this.autoGeneratePhraseQueries);\n+ builder.field(AUTO_GENERATE_PHRASE_QUERIES_FIELD.getPreferredName(), this.autoGeneratePhraseQueries);\n builder.field(MAX_DETERMINED_STATES_FIELD.getPreferredName(), this.maxDeterminizedStates);\n if (this.allowLeadingWildcard != null) {\n builder.field(ALLOW_LEADING_WILDCARD_FIELD.getPreferredName(), this.allowLeadingWildcard);\n@@ -704,7 +704,7 @@ public static Optional<QueryStringQueryBuilder> fromXContent(QueryParseContext p\n quoteAnalyzer = parser.text();\n } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ALLOW_LEADING_WILDCARD_FIELD)) {\n allowLeadingWildcard = parser.booleanValue();\n- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AUTO_GENERATED_PHRASE_QUERIES_FIELD)) {\n+ } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AUTO_GENERATE_PHRASE_QUERIES_FIELD)) {\n autoGeneratePhraseQueries = parser.booleanValue();\n } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_DETERMINED_STATES_FIELD)) {\n maxDeterminizedStates = parser.intValue();", "filename": "core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java", "status": "modified" }, { "diff": "@@ -520,7 +520,7 @@ public void testFromJson() throws IOException {\n \" \\\"use_dis_max\\\" : true,\\n\" +\n \" \\\"tie_breaker\\\" : 0.0,\\n\" +\n \" \\\"default_operator\\\" : \\\"or\\\",\\n\" +\n- \" \\\"auto_generated_phrase_queries\\\" : false,\\n\" +\n+ \" \\\"auto_generate_phrase_queries\\\" : false,\\n\" +\n \" \\\"max_determined_states\\\" : 10000,\\n\" +\n \" \\\"lowercase_expanded_terms\\\" : true,\\n\" +\n \" \\\"enable_position_increment\\\" : true,\\n\" +", "filename": "core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.2\n\n**JVM version**: openjdk version \"1.8.0_91\"\n OpenJDK Runtime Environment (build 1.8.0_91-b14)\n OpenJDK 64-Bit Server VM (build 25.91-b14, mixed mode)\n\n**OS version**: Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\n**Description of the problem including expected versus actual behavior**:\nCan't provide two access & secret keys (different for ec2 and s3).\nI'd like to use cloud-aws plugin for EC2 Discovery & S3 Repository (compatible service).\nI've got separate cloud accounts for Eucalyptus & RiakS3.\n\n**Steps to reproduce**:\n1) Add to elasticsearch.yml:\n\n```\ncloud:\n aws:\n s3:\n access_key: xxxxxxxxxxxx\n secret_key: XXxxxxxXxxxxXxxxXxXxXxxx\n endpoint: s3_cloud_front.address.here\n protocol: http\n signer: S3SignerType\n ec2:\n access_key: yyyyyyyyyyyyy\n secret_key: yyyYyyyyyYyyyyYyyyYyYyY\n endpoint: ec2_cloud_front.address.here\n protocol: http\n\ndiscovery:\n type: ec2\n ec2:\n groups: group_name\n```\n\n2) # /etc/init.d/elasticsearch restart\n\n**Provide logs (if relevant)**:\n`[2016-07-14 15:28:17,690][INFO ][discovery.ec2 ] [Hideko Takata] Exception while retrieving instance list from AWS API: Unable to load AWS credentials from any provider in the chain`\n", "comments": [ { "body": "It's a bug to me. I need to reproduce it.\n", "created_at": "2016-07-15T13:49:13Z" }, { "body": "I read the code today and that's indeed a bug in 2.x series. \n\nWe never read values for [EC2](https://github.com/elastic/elasticsearch/blob/2.4/plugins/cloud-aws/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java#L27-L27) but we do for [S3](https://github.com/elastic/elasticsearch/blob/2.4/plugins/cloud-aws/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java#L65-L68).\n\nI'm pretty sure it's ok for 5.0 (I still need to check it though).\nI'm starting working on a fix for 2.x series.\n\nAs a workaround, I think you could use something like:\n\n``` yml\ncloud:\n aws:\n access_key: yyyyyyyyyyyyy\n secret_key: yyyYyyyyyYyyyyYyyyYyYyY\n s3:\n access_key: xxxxxxxxxxxx\n secret_key: XXxxxxxXxxxxXxxxXxXxXxxx\n endpoint: s3_cloud_front.address.here\n protocol: http\n signer: S3SignerType\n ec2:\n endpoint: ec2_cloud_front.address.here\n protocol: http\n\ndiscovery:\n type: ec2\n ec2:\n groups: group_name\n```\n\nec2 will use the \"global\" AWS access key but S3 will use the S3 specific one.\n", "created_at": "2016-07-20T08:11:55Z" }, { "body": "Closed by #19513 in 2.4 branch.\n", "created_at": "2016-07-21T12:03:51Z" } ], "number": 19445, "title": "cloud-aws different keys for EC2 & S3" }
{ "body": "We have a bug described in #19445 where a user wants to define specific key/secret for EC2 and S3:\n\n```\ncloud:\n aws:\n s3:\n access_key: xxxxxxxxxxxx\n secret_key: XXxxxxxXxxxxXxxxXxXxXxxx\n ec2:\n access_key: yyyyyyyyyyyyy\n secret_key: yyyYyyyyyYyyyyYyyyYyYyY\n```\n\nThis is documented and should work.\n\nThis commit fixes that and adds a test.\n\nBTW, while working on this issue, I discovered that the removal effort made in #12978 has been reintroduced by mistake in fe7421986c71851ac9689b1ab9feaed618a7064a. So I removed this again.\n\nCloses #19445\n", "number": 19513, "review_comments": [], "title": "Support specific key/secret for EC2" }
{ "commits": [ { "message": "Support specific key/secret for EC2\n\nWe have a bug described in #19445 where a user wants to define specific key/secret for EC2 and S3:\n\n```\ncloud:\n aws:\n s3:\n access_key: xxxxxxxxxxxx\n secret_key: XXxxxxxXxxxxXxxxXxXxXxxx\n ec2:\n access_key: yyyyyyyyyyyyy\n secret_key: yyyYyyyyyYyyyyYyyyYyYyY\n```\n\nThis is documented and should work.\n\nThis commit fixes that and adds a test.\n\nBTW, while working on this issue, I discovered that the removal effort made in #12978 has been reintroduced by mistake in fe7421986c71851ac9689b1ab9feaed618a7064a. So I removed this again.\n\nCloses #19445" } ], "files": [ { "diff": "@@ -23,13 +23,17 @@\n import com.amazonaws.AmazonWebServiceRequest;\n import com.amazonaws.ClientConfiguration;\n import com.amazonaws.Protocol;\n-import com.amazonaws.auth.*;\n+import com.amazonaws.auth.AWSCredentialsProvider;\n+import com.amazonaws.auth.AWSCredentialsProviderChain;\n+import com.amazonaws.auth.BasicAWSCredentials;\n+import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;\n+import com.amazonaws.auth.InstanceProfileCredentialsProvider;\n+import com.amazonaws.auth.SystemPropertiesCredentialsProvider;\n import com.amazonaws.internal.StaticCredentialsProvider;\n import com.amazonaws.retry.RetryPolicy;\n import com.amazonaws.services.ec2.AmazonEC2;\n import com.amazonaws.services.ec2.AmazonEC2Client;\n import org.elasticsearch.ElasticsearchException;\n-import org.elasticsearch.cloud.aws.AwsService.CLOUD;\n import org.elasticsearch.cloud.aws.AwsService.CLOUD_AWS;\n import org.elasticsearch.cloud.aws.network.Ec2NameResolver;\n import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes;\n@@ -56,8 +60,6 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent<AwsEc2Service>\n public AwsEc2ServiceImpl(Settings settings, SettingsFilter settingsFilter, NetworkService networkService, DiscoveryNodeService discoveryNodeService) {\n super(settings);\n // Filter global settings\n- settingsFilter.addFilter(CLOUD.KEY);\n- settingsFilter.addFilter(CLOUD.ACCOUNT);\n settingsFilter.addFilter(CLOUD_AWS.KEY);\n settingsFilter.addFilter(CLOUD_AWS.SECRET);\n settingsFilter.addFilter(CLOUD_AWS.PROXY_PASSWORD);\n@@ -93,8 +95,6 @@ public synchronized AmazonEC2 client() {\n } else {\n throw new IllegalArgumentException(\"No protocol supported [\" + protocol + \"], can either be [http] or [https]\");\n }\n- String account = settings.get(CLOUD_AWS.KEY, settings.get(CLOUD.ACCOUNT));\n- String key = settings.get(CLOUD_AWS.SECRET, settings.get(CLOUD.KEY));\n \n String proxyHost = settings.get(CLOUD_AWS.PROXY_HOST, settings.get(CLOUD_AWS.DEPRECATED_PROXY_HOST));\n proxyHost = settings.get(CLOUD_EC2.PROXY_HOST, settings.get(CLOUD_EC2.DEPRECATED_PROXY_HOST, proxyHost));\n@@ -147,21 +147,7 @@ public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,\n false);\n clientConfiguration.setRetryPolicy(retryPolicy);\n \n- AWSCredentialsProvider credentials;\n-\n- if (account == null && key == null) {\n- credentials = new AWSCredentialsProviderChain(\n- new EnvironmentVariableCredentialsProvider(),\n- new SystemPropertiesCredentialsProvider(),\n- new InstanceProfileCredentialsProvider()\n- );\n- } else {\n- credentials = new AWSCredentialsProviderChain(\n- new StaticCredentialsProvider(new BasicAWSCredentials(account, key))\n- );\n- }\n-\n- this.client = new AmazonEC2Client(credentials, clientConfiguration);\n+ this.client = new AmazonEC2Client(buildCredentials(settings), clientConfiguration);\n \n if (settings.get(CLOUD_EC2.ENDPOINT) != null) {\n String endpoint = settings.get(CLOUD_EC2.ENDPOINT);\n@@ -204,6 +190,22 @@ public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,\n return this.client;\n }\n \n+ public static AWSCredentialsProvider buildCredentials(Settings settings) {\n+ String account = settings.get(CLOUD_EC2.KEY, settings.get(CLOUD_AWS.KEY));\n+ String key = settings.get(CLOUD_EC2.SECRET, settings.get(CLOUD_AWS.SECRET));\n+ if (account == null && key == null) {\n+ return new AWSCredentialsProviderChain(\n+ new EnvironmentVariableCredentialsProvider(),\n+ new SystemPropertiesCredentialsProvider(),\n+ new InstanceProfileCredentialsProvider()\n+ );\n+ }\n+\n+ return new AWSCredentialsProviderChain(\n+ new StaticCredentialsProvider(new BasicAWSCredentials(account, key))\n+ );\n+ }\n+\n @Override\n protected void doStart() throws ElasticsearchException {\n }", "filename": "plugins/cloud-aws/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java", "status": "modified" }, { "diff": "@@ -20,16 +20,6 @@\n package org.elasticsearch.cloud.aws;\n \n public interface AwsService {\n- /**\n- * Should be either moved to Core if this settings makes sense\n- * Or removed. See https://github.com/elastic/elasticsearch/issues/12809\n- */\n- @Deprecated\n- final class CLOUD {\n- public static final String KEY = \"cloud.key\";\n- public static final String ACCOUNT = \"cloud.account\";\n- }\n-\n final class CLOUD_AWS {\n public static final String KEY = \"cloud.aws.access_key\";\n public static final String SECRET = \"cloud.aws.secret_key\";", "filename": "plugins/cloud-aws/src/main/java/org/elasticsearch/cloud/aws/AwsService.java", "status": "modified" }, { "diff": "@@ -0,0 +1,61 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.discovery.ec2;\n+\n+import com.amazonaws.auth.AWSCredentialsProvider;\n+import com.amazonaws.services.ec2.AmazonEC2;\n+import org.elasticsearch.cloud.aws.AwsEc2Service;\n+import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl;\n+import org.elasticsearch.cloud.aws.AwsModule;\n+import org.elasticsearch.cloud.aws.AwsService;\n+import org.elasticsearch.cluster.node.DiscoveryNodeService;\n+import org.elasticsearch.common.network.NetworkService;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.settings.SettingsFilter;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import static org.hamcrest.Matchers.is;\n+\n+public class Ec2CredentialSettingsTests extends ESTestCase {\n+\n+ public void testAwsSettings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsService.CLOUD_AWS.KEY, \"aws_key\")\n+ .put(AwsService.CLOUD_AWS.SECRET, \"aws_secret\")\n+ .build();\n+\n+ AWSCredentialsProvider credentials = AwsEc2ServiceImpl.buildCredentials(settings);\n+ assertThat(credentials.getCredentials().getAWSAccessKeyId(), is(\"aws_key\"));\n+ assertThat(credentials.getCredentials().getAWSSecretKey(), is(\"aws_secret\"));\n+ }\n+\n+ public void testEc2Settings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsService.CLOUD_AWS.KEY, \"aws_key\")\n+ .put(AwsService.CLOUD_AWS.SECRET, \"aws_secret\")\n+ .put(AwsEc2Service.CLOUD_EC2.KEY, \"ec2_key\")\n+ .put(AwsEc2Service.CLOUD_EC2.SECRET, \"ec2_secret\")\n+ .build();\n+\n+ AWSCredentialsProvider credentials = AwsEc2ServiceImpl.buildCredentials(settings);\n+ assertThat(credentials.getCredentials().getAWSAccessKeyId(), is(\"ec2_key\"));\n+ assertThat(credentials.getCredentials().getAWSSecretKey(), is(\"ec2_secret\"));\n+ }\n+}", "filename": "plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2CredentialSettingsTests.java", "status": "added" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 5.0.0-alpha3\n\n**JVM version**: 1.8.0_73-b02\n\n**OS version**: CentOS Linux release 7.2.1511\n\n**Description of the problem including expected versus actual behavior**:\nI'm trying to upgrade an ES plugin from 2.3 to 5.0.0-alpha3, and a template we previously had working is now causing an issue. The template attempts to set some default analyzers for an index, but when the index is created it throws an error saying there's already an analyzer with the default name. \n\nHere's the template I'm using:\n`{\n \"order\": 0, \n \"settings\": {\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.alias\": \"default\",\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.type\": \"fairhair-index-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.type\": \"fairhair-tokenizing-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.alias\": \"default_search\"\n },\n\"mappings\" : {\n //some mappings\n},\n\"template\" : \"document.*\"\n}`\n\n**Provide logs (if relevant)**:\nHere's the error stack trace I get:\n`elasticsearch_1 | [2016-06-28 23:40:43,871][DEBUG][action.admin.indices.create] [Iridia] [document.document-20151106] failed to create\nelasticsearch_1 | java.lang.IllegalStateException: already registered analyzer with name: default\nelasticsearch_1 | at org.elasticsearch.index.analysis.AnalysisService.<init>(AnalysisService.java:109)\nelasticsearch_1 | at org.elasticsearch.index.analysis.AnalysisRegistry.build(AnalysisRegistry.java:161)\nelasticsearch_1 | at org.elasticsearch.index.IndexService.<init>(IndexService.java:138)\nelasticsearch_1 | at org.elasticsearch.index.IndexModule.newIndexService(IndexModule.java:328)\nelasticsearch_1 | at org.elasticsearch.indices.IndicesService.createIndexService(IndicesService.java:398)\nelasticsearch_1 | at org.elasticsearch.indices.IndicesService.createIndex(IndicesService.java:363)\n`\n\nI've looked at the AnalysisService class constructor, and I see it does a check for analyzers with the name \"default\" in the passed in analyzer mappings. I believe these conditional statements fire:\n\n`if (!analyzerProviders.containsKey(\"default\")) {\n analyzerProviders.put(\"default\", new StandardAnalyzerProvider(indexSettings, null, \"default\", Settings.Builder.EMPTY_SETTINGS));\n }\n if (!analyzerProviders.containsKey(\"default_search\")) {\n analyzerProviders.put(\"default_search\", analyzerProviders.get(\"default\"));\n }\n if (!analyzerProviders.containsKey(\"default_search_quoted\")) {\n analyzerProviders.put(\"default_search_quoted\", analyzerProviders.get(\"default_search\"));\n }`\n\nSo it registers the standard analyzer, but while iterating through the passed in analyzers, this check also fires:\n\n`if (analyzers.containsKey(name)) { //Grabs the name from each entry in analyzer map\n throw new IllegalStateException(\"already registered analyzer with name: \" + name);\n }`\n\nI'm confused as to how both these conditions are firing. This check to see if the current map of analyzers already contains the key \"name\" was introduced in ES 5. \n\nEDIT: I know why the illegal state exception is being thrown. The Analysis Registry automatically adds the entry: \"default\" => StandardAnalyzer. So from what I can tell, it's actually impossible to register your own default index analyzer, since there will always be the standard analyzer. I think the check for whether a name is already in the analyzer mappings is a bug and should be removed. \n", "comments": [ { "body": "that is a bug - I think we should not register a \"default\" analyzer in our `AnalysisModule` since `AnalysisService` does it for us if there is no default specified. \n", "created_at": "2016-06-30T07:25:16Z" }, { "body": "I think throwing an illegal state exception when encountering a duplicate analyzer name should be removed. If user defined analyzers are deterministically placed into the analyzer mappings after the built in analyzers, then I think it makes sense to simply overwrite them. Maybe I'm not clear on how all this stuff get's wired up, but I'm actually a little confused as to how it's even possible to trigger duplicate analyzer named form within the constructor of AnalysisService. A Map of (analyzer name => analyzer) is passed into the constructor, which by definition has no duplicate keys. So how is it that while iterating through this map that we even encounter the same key twice? \n", "created_at": "2016-06-30T16:31:14Z" }, { "body": "> I think throwing an illegal state exception when encountering a duplicate analyzer name should be removed. If user defined analyzers are deterministically placed into the analyzer mappings after the built in analyzers, then I think it makes sense to simply overwrite them. Maybe I'm not clear on how all this stuff get's wired up, but I'm actually a little confused as to how it's even possible to trigger duplicate analyzer named form within the constructor of AnalysisService. A Map of (analyzer name => analyzer) is passed into the constructor, which by definition has no duplicate keys. So how is it that while iterating through this map that we even encounter the same key twice?\n\n-1 default is a special one that's fine but nobody should override `english` analyzer etc. ever, you can define your own that's fine\n", "created_at": "2016-06-30T17:02:37Z" }, { "body": "True. Could you enlighten me on how it's even possible to encounter duplicate keys from the passed in Map? I think I'm missing some crucial understanding of the issue. \n", "created_at": "2016-06-30T17:21:43Z" }, { "body": "Ah I found the issue. The initial mapping of analyzer providers that comes in will contain a default mapping to the standard analyzer provider. And although I have defined an alias for my custom analyzer to be the default analyzer, this alias isn't picked up until AFTER the initial check to see if \"default\" is in the provider mapping. When I was debugging ES I found that that my custom analyzer was the first to be iterated over. So the mapping of analyzers was first updated to include my analyzer, then the aliases of my custom analyzer were added to the mapping of analyzers. So after this first iteration, there was a mapping of \"default\" => CustomAnalyzer. When we finally iterate over the (default => standard analyzer provider) entry, that's where we hit the illegal state exception. So maybe to fix the issue, we move the initial check to see if \"default\" is missing from the passed in analyzer providers after we iterate through the providers.\n", "created_at": "2016-06-30T19:42:21Z" }, { "body": "what about just never register a default in the AnalysisModule - that should fix the problem\n", "created_at": "2016-06-30T21:02:39Z" }, { "body": "I tried doing that, and the default analyzer snuck in still via the PreBuiltAnalyzers. So I deleted the DEFAULT entry in the PrebuildAnalyzers enumeration, but it still snuck in because the AnalysisService does a check for it in the providers but doesn't look at the possible aliases. So I finally got around it by moving that check after the for loop. Because by then, all the aliases have been registered.\n", "created_at": "2016-06-30T21:23:19Z" }, { "body": "let's look at some code, can you open a PR?\n", "created_at": "2016-06-30T21:25:11Z" }, { "body": "Sure! \n", "created_at": "2016-06-30T21:26:57Z" }, { "body": "I added a testcase to master just to make sure it actually doesn't work but it does. I can reproduce your issue if I alias the default analyzer twice in the mapping, I wonder if you have an issue in your mapping that 5.x detects? Se my test here: https://github.com/elastic/elasticsearch/commit/1cb1373722b97a23f7f551e9827bbedea97b1424\n", "created_at": "2016-07-08T08:34:32Z" }, { "body": "Hmm interesting. I don't think there's anything wrong with my mapping. Here's the settings portion:\n\"settings\": {\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.alias\": \"default\",\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.type\": \"fairhair-index-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.type\": \"fairhair-tokenizing-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.alias\": \"default_search\"\n }\n", "created_at": "2016-07-08T16:05:59Z" }, { "body": "wait, what is `fairhair-index-analyzer` and where is is defined?\n", "created_at": "2016-07-08T19:08:41Z" }, { "body": "It's a custom analyzer defined in a plugin. It's registered with the AnalysisModule via the `onModule(AnalysisModule module)` method:\n\n`module.registerAnalyzer(INDEX_ANALYZER_NAME, FairhairIndexAnalyzerProvider::new);\n module.registerAnalyzer(TOKENIZING_ANALYZER_NAME, FairhairTokenizingAnalyzerProvider::new);`\n\nINDEX_ANALYZER_NAME = \"fairhair-index-analyzer\"\nTOKENIZING_ANALYZER_NAME = \"fairhair-tokenizing-analyzer\"\n", "created_at": "2016-07-08T19:50:04Z" }, { "body": "one more question, do you send settings with the create index request since your setting here come from a template?\n", "created_at": "2016-07-08T20:37:55Z" }, { "body": "The settings are being posted to ES once it's up and running. I'm able to see the template with `GET /_template/document-template.json` before there's any indices in ES. Once I try to post data to the index, that's when the \"failed to create index exception\" happens\n", "created_at": "2016-07-08T20:42:06Z" }, { "body": "@mitchswaldman can you provide a full re-creation of the issue like with curl statements and the bodies of the templates etc? \n", "created_at": "2016-07-11T10:09:51Z" }, { "body": "@mitchswaldman ping\n", "created_at": "2016-07-18T14:32:07Z" }, { "body": "Oh shoot my bad! Of course. \n\n1) We are running Elasticsearch in Docker so here are the relevant parts of the Dockerfile for installing elasticsearch.\n\n`Dockerfile:`\n\n# \n\nENV PATH /usr/share/elasticsearch/bin:$PATH\n\n```\nENV ES_JAVA_OPTS=\"$ES_JAVA_OPTS -Xmx1g -Xms1g\"\n\nRUN curl -O https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/$ELASTICSEARCH_VERSION/elasticsearch-$ELASTICSEARCH_VERSION.tar.gz\nRUN tar -zxvf elasticsearch-$ELASTICSEARCH_VERSION.tar.gz\n\nRUN useradd elasticsearch\n\nRUN mv -f elasticsearch-$ELASTICSEARCH_VERSION/ /usr/share/elasticsearch\n\nRUN set -ex \\\n && for path in \\\n /usr/share/elasticsearch/data \\\n /usr/share/elasticsearch/logs \\\n /usr/share/elasticsearch/config \\\n /usr/share/elasticsearch/config/scripts \\\n ; do \\\n mkdir -p \"$path\"; \\\n chown -R elasticsearch:elasticsearch \"$path\"; \\\n done\n\n\nCOPY config/logging.yml /usr/share/elasticsearch/config/\n\nVOLUME /usr/share/elasticsearch/data\n\nCOPY docker-entrypoint.sh /\nCOPY startup.sh /\nCOPY startup-wrapper.sh /\n\nENTRYPOINT [\"/docker-entrypoint.sh\"]\n\nEXPOSE 9200 9300\n\nCMD [\"/startup-wrapper.sh\"]\n\n\nADD plugins /usr/share/elasticsearch/plugins_source\n\n\nRUN /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu\nRUN /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-phonetic\nRUN /usr/share/elasticsearch/bin/elasticsearch-plugin install x-pack\nRUN /usr/share/elasticsearch/bin/elasticsearch-plugin install file:////usr/share/elasticsearch/plugins_source/fairhair-elasticsearch-queries-1.19.0_es20/fairhair-elasticsearch-queries-1.19.0-SNAPSHOT.zip\n```\n\n# \n\nThat last plugin is the custom plugin we wrote which registers `FairhairTokenizer` and the other Fairhair analyzers.\n\n2) On start up is when we post the templates to ES.\n\n`startup-wrapper.sh` (referenced in Dockerfile)\n\n# \n\n```\nset -m\n\nelasticsearch &\n\nwhile [ $(curl -s -o /dev/null -w \"%{http_code}\" localhost:9200) != 200 ]; do echo \"Waiting for elasticsearch to be ready before posting templates...\"; sleep 5; done\n\nfor template in $(ls -1 /usr/share/elasticsearch/config/templates); do\n echo -e \"\\nPosting template: $template\"\n curl -s -XPUT \"localhost:9200/_template/$template\" -d\"@/usr/share/elasticsearch/config/templates/$template\"\ndone\n\necho -e \"\\nAttaching to elasticsearch process\" \nfg %1\n```\n\n# \n\n3) Here is the template in question. I've removed many of the mappings since they are somewhat redundant. \n`document-template.json`\n\n# \n\n```\n{\n \"order\": 0,\n \"settings\": {\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.alias\": \"default\",\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.type\": \"fairhair-index-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.type\": \"fairhair-tokenizing-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.alias\": \"default_search\"\n },\n \"mappings\": {\n \"docnews\": {\n \"_source\": {\n \"enabled\": true\n },\n \"dynamic\": \"false\",\n \"_timestamp\": {\n \"enabled\": true\n },\n \"_all\": {\n \"enabled\": false\n },\n \"properties\": {\n \"body\": {\n \"dynamic\": false,\n \"properties\": {\n \"byLine\": {\n \"dynamic\": false,\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"store\": \"no\",\n \"index\": \"analyzed\",\n \"norms\": \"false\",\n \"index_options\": \"positions\",\n \"doc_values\": \"false\",\n \"analyzer\": \"fairhairIndexAnalyzerv3\"\n }\n }\n },\n \"content\": {\n \"dynamic\": false,\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"store\": \"no\",\n \"index\": \"analyzed\",\n \"norms\": \"false\",\n \"index_options\": \"positions\",\n \"doc_values\": \"false\",\n \"analyzer\": \"fairhairIndexAnalyzerv3\"\n }\n }\n },\n \"ingress\": {\n \"dynamic\": false,\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"store\": \"no\",\n \"index\": \"analyzed\",\n \"norms\": \"false\",\n \"index_options\": \"positions\",\n \"doc_values\": \"false\",\n \"analyzer\": \"fairhairIndexAnalyzerv3\"\n }\n }\n },\n \"publishDate\": {\n \"dynamic\": false,\n \"properties\": {\n \"date\": {\n \"type\": \"date\",\n \"doc_values\": \"true\"\n }\n }\n },\n \"title\": {\n \"dynamic\": false,\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"store\": \"no\",\n \"index\": \"analyzed\",\n \"norms\": \"false\",\n \"index_options\": \"positions\",\n \"doc_values\": \"false\",\n \"analyzer\": \"fairhairIndexAnalyzerv3\"\n }\n }\n },\n \"contentTags\": {\n \"type\": \"keyword\",\n \"index\": \"not_analyzed\",\n \"norms\": \"false\",\n \"index_options\": \"docs\",\n \"doc_values\": \"true\"\n },\n \"links\": {\n \"dynamic\": false,\n \"type\": \"nested\",\n \"properties\": {\n \"url\": {\n \"type\": \"keyword\",\n \"index\": \"not_analyzed\",\n \"norms\": \"false\",\n \"index_options\": \"docs\",\n \"doc_values\": \"true\"\n }\n }\n }\n }\n }\n },\n \"template\": \"document.*\"\n}\n```\n\n# \n\n4) The final step in creating the error is to simply POST some data to an index such as `document.document-20160716`. Upon index creation, ES will fail and throw the aforementioned error. \n\nLet me know if there's anything I need to clear up or add on. For instance, is there a part in the custom queries plugin you need to see?\n", "created_at": "2016-07-18T17:10:11Z" }, { "body": "Out of interest, why are you doing this:\n\n```\n\"settings\": {\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.alias\": \"default\",\n \"index.analysis.analyzer.fairhairIndexAnalyzerv3.type\": \"fairhair-index-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.type\": \"fairhair-tokenizing-analyzer\",\n \"index.analysis.analyzer.fairhairTokenizingAnalyzer.alias\": \"default_search\"\n},\n```\n\nWhy not just do this:\n\n```\n\"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"default\": {\n \"type\": \"fairhair-index-analyzer\"\n },\n \"default_search\": {\n \"type\": \"fairhair-tokenizing-analyzer\"\n }\n }\n }\n}\n```\n", "created_at": "2016-07-19T12:47:15Z" }, { "body": "This template was given to me. So the settings part just came like that. \n", "created_at": "2016-07-19T16:03:11Z" }, { "body": "@mitchswaldman @clintongormley I opened a fix for this issue in #19506 @mitchswaldman would you be able to test this fix if it solves your issue?\n", "created_at": "2016-07-19T19:44:49Z" }, { "body": "@mitchswaldman on another note, I think you should move to the mapping @clintongormley suggested. The entire alias feature seems pretty esoteric and it has lots of caveats like the late binding it does can cause tricky problems. ie.\n\n```\n\"index.analysis.analyzer.foobar.alias\" : \"default\"\n\"index.analysis.analyzer.foobar.type\", \"german\"\n\"index.analysis.analyzer.foobar_search.alias\": \"default_search\"\n\"index.analysis.analyzer.foobar_search.type\": \"default\"\n```\n\nwon't work as you expect, it uses the `standard` analyzer as `default_search` but `german` for indexing which is, err not ideal :) - I think one way or the other we will remove this ability unless I understand it's real purpose at some point :)... \n", "created_at": "2016-07-19T19:47:47Z" } ], "number": 19163, "title": "ES 5 Doesn't allow registering default analyzer" }
{ "body": "In the lack of tests the analyzer.alias feature was pretty much not working\nat all on current master. Issues like #19163 showed some serious problems for users\nusing this feature upgrading to an alpha version.\nThis change fixes the processing order and allows aliases to be set for\nexisting analyzers like `default`. This change also ensures that if `default`\nis aliased the correct analyzer is used for `default_search` etc.\n\nCloses #19163\n", "number": 19506, "review_comments": [ { "body": "we should probably have the same tests in TextFieldMapperTests?\n", "created_at": "2016-07-20T06:26:01Z" }, { "body": "I'm curious if that was important for the fix or if it is just for consistency?\n", "created_at": "2016-07-20T06:27:01Z" }, { "body": "this is just consistency - I didnt' have to fix this part since it was a bug in the AnalysisSerivice after all\n", "created_at": "2016-07-20T07:14:16Z" }, { "body": "I can add those\n", "created_at": "2016-07-20T07:14:28Z" } ], "title": "Fix analyzer alias processing" }
{ "commits": [ { "message": "Fix analyzer alias processeing\n\nIn the lack of tests the analyzer.alias feature was pretty much not working\nat all on current master. Issues like #19163 showed some serious problems for users\nusing this feature upgrading to an alpha version.\nThis change fixes the processing order and allows aliases to be set for\nexisting analyzers like `default`. This change also ensures that if `default`\nis aliased the correct analyzer is used for `default_search` etc.\n\nCloses #19163" }, { "message": "added more tests" }, { "message": "Merge branch 'master' into fix_analyzer_alias" } ], "files": [ { "diff": "@@ -28,8 +28,11 @@\n import org.elasticsearch.index.mapper.core.TextFieldMapper;\n \n import java.io.Closeable;\n+import java.util.Arrays;\n import java.util.HashMap;\n+import java.util.HashSet;\n import java.util.Map;\n+import java.util.Set;\n \n import static java.util.Collections.unmodifiableMap;\n \n@@ -58,69 +61,34 @@ public AnalysisService(IndexSettings indexSettings,\n this.tokenFilters = unmodifiableMap(tokenFilterFactoryFactories);\n analyzerProviders = new HashMap<>(analyzerProviders);\n \n- if (!analyzerProviders.containsKey(\"default\")) {\n- analyzerProviders.put(\"default\", new StandardAnalyzerProvider(indexSettings, null, \"default\", Settings.Builder.EMPTY_SETTINGS));\n- }\n- if (!analyzerProviders.containsKey(\"default_search\")) {\n- analyzerProviders.put(\"default_search\", analyzerProviders.get(\"default\"));\n- }\n- if (!analyzerProviders.containsKey(\"default_search_quoted\")) {\n- analyzerProviders.put(\"default_search_quoted\", analyzerProviders.get(\"default_search\"));\n- }\n-\n+ Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();\n Map<String, NamedAnalyzer> analyzers = new HashMap<>();\n for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {\n- AnalyzerProvider<?> analyzerFactory = entry.getValue();\n- String name = entry.getKey();\n- /*\n- * Lucene defaults positionIncrementGap to 0 in all analyzers but\n- * Elasticsearch defaults them to 0 only before version 2.0\n- * and 100 afterwards so we override the positionIncrementGap if it\n- * doesn't match here.\n- */\n- int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;\n- if (analyzerFactory instanceof CustomAnalyzerProvider) {\n- ((CustomAnalyzerProvider) analyzerFactory).build(this);\n- /*\n- * Custom analyzers already default to the correct, version\n- * dependent positionIncrementGap and the user is be able to\n- * configure the positionIncrementGap directly on the analyzer so\n- * we disable overriding the positionIncrementGap to preserve the\n- * user's setting.\n- */\n- overridePositionIncrementGap = Integer.MIN_VALUE;\n- }\n- Analyzer analyzerF = analyzerFactory.get();\n- if (analyzerF == null) {\n- throw new IllegalArgumentException(\"analyzer [\" + analyzerFactory.name() + \"] created null analyzer\");\n- }\n- NamedAnalyzer analyzer;\n- if (analyzerF instanceof NamedAnalyzer) {\n- // if we got a named analyzer back, use it...\n- analyzer = (NamedAnalyzer) analyzerF;\n- if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {\n- // unless the positionIncrementGap needs to be overridden\n- analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);\n- }\n+ processAnalyzerFactory(entry.getKey(), entry.getValue(), analyzerAliases, analyzers);\n+ }\n+ for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {\n+ String key = entry.getKey();\n+ if (analyzers.containsKey(key) &&\n+ (\"default\".equals(key) || \"default_search\".equals(key) || \"default_search_quoted\".equals(key)) == false) {\n+ throw new IllegalStateException(\"already registered analyzer with name: \" + key);\n } else {\n- analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);\n- }\n- if (analyzers.containsKey(name)) {\n- throw new IllegalStateException(\"already registered analyzer with name: \" + name);\n- }\n- analyzers.put(name, analyzer);\n- String strAliases = this.indexSettings.getSettings().get(\"index.analysis.analyzer.\" + analyzerFactory.name() + \".alias\");\n- if (strAliases != null) {\n- for (String alias : Strings.commaDelimitedListToStringArray(strAliases)) {\n- analyzers.put(alias, analyzer);\n- }\n- }\n- String[] aliases = this.indexSettings.getSettings().getAsArray(\"index.analysis.analyzer.\" + analyzerFactory.name() + \".alias\");\n- for (String alias : aliases) {\n- analyzers.put(alias, analyzer);\n+ NamedAnalyzer configured = entry.getValue();\n+ analyzers.put(key, configured);\n }\n }\n \n+ if (!analyzers.containsKey(\"default\")) {\n+ processAnalyzerFactory(\"default\", new StandardAnalyzerProvider(indexSettings, null, \"default\", Settings.Builder.EMPTY_SETTINGS),\n+ analyzerAliases, analyzers);\n+ }\n+ if (!analyzers.containsKey(\"default_search\")) {\n+ analyzers.put(\"default_search\", analyzers.get(\"default\"));\n+ }\n+ if (!analyzers.containsKey(\"default_search_quoted\")) {\n+ analyzers.put(\"default_search_quoted\", analyzers.get(\"default_search\"));\n+ }\n+\n+\n NamedAnalyzer defaultAnalyzer = analyzers.get(\"default\");\n if (defaultAnalyzer == null) {\n throw new IllegalArgumentException(\"no default analyzer configured\");\n@@ -145,6 +113,58 @@ public AnalysisService(IndexSettings indexSettings,\n this.analyzers = unmodifiableMap(analyzers);\n }\n \n+ private void processAnalyzerFactory(String name, AnalyzerProvider<?> analyzerFactory, Map<String, NamedAnalyzer> analyzerAliases, Map<String, NamedAnalyzer> analyzers) {\n+ /*\n+ * Lucene defaults positionIncrementGap to 0 in all analyzers but\n+ * Elasticsearch defaults them to 0 only before version 2.0\n+ * and 100 afterwards so we override the positionIncrementGap if it\n+ * doesn't match here.\n+ */\n+ int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;\n+ if (analyzerFactory instanceof CustomAnalyzerProvider) {\n+ ((CustomAnalyzerProvider) analyzerFactory).build(this);\n+ /*\n+ * Custom analyzers already default to the correct, version\n+ * dependent positionIncrementGap and the user is be able to\n+ * configure the positionIncrementGap directly on the analyzer so\n+ * we disable overriding the positionIncrementGap to preserve the\n+ * user's setting.\n+ */\n+ overridePositionIncrementGap = Integer.MIN_VALUE;\n+ }\n+ Analyzer analyzerF = analyzerFactory.get();\n+ if (analyzerF == null) {\n+ throw new IllegalArgumentException(\"analyzer [\" + analyzerFactory.name() + \"] created null analyzer\");\n+ }\n+ NamedAnalyzer analyzer;\n+ if (analyzerF instanceof NamedAnalyzer) {\n+ // if we got a named analyzer back, use it...\n+ analyzer = (NamedAnalyzer) analyzerF;\n+ if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {\n+ // unless the positionIncrementGap needs to be overridden\n+ analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);\n+ }\n+ } else {\n+ analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);\n+ }\n+ if (analyzers.containsKey(name)) {\n+ throw new IllegalStateException(\"already registered analyzer with name: \" + name);\n+ }\n+ analyzers.put(name, analyzer);\n+ String strAliases = this.indexSettings.getSettings().get(\"index.analysis.analyzer.\" + analyzerFactory.name() + \".alias\");\n+ Set<String> aliases = new HashSet<>();\n+ if (strAliases != null) {\n+ aliases.addAll(Strings.commaDelimitedListToSet(strAliases));\n+ }\n+ aliases.addAll(Arrays.asList(this.indexSettings.getSettings()\n+ .getAsArray(\"index.analysis.analyzer.\" + analyzerFactory.name() + \".alias\")));\n+ for (String alias : aliases) {\n+ if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {\n+ throw new IllegalStateException(\"alias [\" + alias + \"] is already used by [\" + analyzerAliases.get(alias).name() + \"]\");\n+ }\n+ }\n+ }\n+\n @Override\n public void close() {\n for (NamedAnalyzer analyzer : analyzers.values()) {", "filename": "core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java", "status": "modified" }, { "diff": "@@ -435,9 +435,9 @@ protected final void doXContentAnalyzers(XContentBuilder builder, boolean includ\n boolean hasDifferentSearchQuoteAnalyzer = fieldType().searchAnalyzer().name().equals(fieldType().searchQuoteAnalyzer().name()) == false;\n if (includeDefaults || hasDefaultIndexAnalyzer == false || hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) {\n builder.field(\"analyzer\", fieldType().indexAnalyzer().name());\n- if (hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) {\n+ if (includeDefaults || hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) {\n builder.field(\"search_analyzer\", fieldType().searchAnalyzer().name());\n- if (hasDifferentSearchQuoteAnalyzer) {\n+ if (includeDefaults || hasDifferentSearchQuoteAnalyzer) {\n builder.field(\"search_quote_analyzer\", fieldType().searchQuoteAnalyzer().name());\n }\n }", "filename": "core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import org.apache.lucene.index.TermsEnum;\n import org.apache.lucene.util.BytesRef;\n import org.elasticsearch.common.compress.CompressedXContent;\n+import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.IndexService;\n@@ -44,6 +45,7 @@\n import org.junit.Before;\n \n import java.io.IOException;\n+import java.util.Collections;\n import java.util.Arrays;\n import java.util.HashMap;\n import java.util.Map;\n@@ -284,6 +286,46 @@ public void testSearchAnalyzerSerialization() throws IOException {\n \n mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n assertEquals(mapping, mapper.mappingSource().toString());\n+\n+ mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"text\")\n+ .field(\"analyzer\", \"keyword\")\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+\n+ mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, mapper.mappingSource().toString());\n+\n+ // special case: default search analyzer\n+ mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"text\")\n+ .field(\"analyzer\", \"keyword\")\n+ .field(\"search_analyzer\", \"default\")\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+\n+ mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, mapper.mappingSource().toString());\n+\n+ mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"text\")\n+ .field(\"analyzer\", \"keyword\")\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+ mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+\n+ mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(\"include_defaults\", \"true\")));\n+ String mappingString = builder.string();\n+ assertTrue(mappingString.contains(\"analyzer\"));\n+ assertTrue(mappingString.contains(\"search_analyzer\"));\n+ assertTrue(mappingString.contains(\"search_quote_analyzer\"));\n }\n \n public void testSearchQuoteAnalyzerSerialization() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -53,6 +53,7 @@\n import java.io.IOException;\n import java.util.Arrays;\n import java.util.Collection;\n+import java.util.Collections;\n import java.util.Map;\n \n import static java.util.Collections.emptyMap;\n@@ -301,6 +302,48 @@ public void testSearchAnalyzerSerialization() throws IOException {\n \n mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n assertEquals(mapping, mapper.mappingSource().toString());\n+\n+ // special case: default search analyzer\n+ mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"string\")\n+ .field(\"analyzer\", \"keyword\")\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+\n+ mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, mapper.mappingSource().toString());\n+\n+ // special case: default search analyzer\n+ mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"string\")\n+ .field(\"analyzer\", \"keyword\")\n+ .field(\"search_analyzer\", \"default\")\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+\n+ mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ assertEquals(mapping, mapper.mappingSource().toString());\n+\n+\n+ mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"string\")\n+ .field(\"analyzer\", \"keyword\")\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+ mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+\n+ mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(\"include_defaults\", \"true\")));\n+ String mappingString = builder.string();\n+ assertTrue(mappingString.contains(\"analyzer\"));\n+ assertTrue(mappingString.contains(\"search_analyzer\"));\n+ assertTrue(mappingString.contains(\"search_quote_analyzer\"));\n }\n \n private Map<String, Object> getSerializedMap(String fieldName, DocumentMapper mapper) throws Exception {", "filename": "core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java", "status": "modified" }, { "diff": "@@ -25,6 +25,8 @@\n import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;\n import org.apache.lucene.analysis.core.KeywordAnalyzer;\n import org.apache.lucene.analysis.core.WhitespaceTokenizer;\n+import org.apache.lucene.analysis.de.GermanAnalyzer;\n+import org.apache.lucene.analysis.en.EnglishAnalyzer;\n import org.apache.lucene.analysis.fa.PersianNormalizationFilter;\n import org.apache.lucene.analysis.hunspell.Dictionary;\n import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;\n@@ -52,6 +54,7 @@\n import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;\n import org.elasticsearch.plugins.AnalysisPlugin;\n import org.elasticsearch.test.IndexSettingsModule;\n+import org.elasticsearch.test.VersionUtils;\n import org.hamcrest.MatcherAssert;\n \n import java.io.BufferedWriter;\n@@ -126,29 +129,59 @@ public void testAnalyzerAlias() throws IOException {\n Settings settings = Settings.builder()\n .put(\"index.analysis.analyzer.foobar.alias\",\"default\")\n .put(\"index.analysis.analyzer.foobar.type\", \"keyword\")\n+ .put(\"index.analysis.analyzer.foobar_search.alias\",\"default_search\")\n+ .put(\"index.analysis.analyzer.foobar_search.type\",\"english\")\n .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())\n- .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersion(random()))\n .build();\n AnalysisRegistry newRegistry = getNewRegistry(settings);\n AnalysisService as = getAnalysisService(newRegistry, settings);\n assertThat(as.analyzer(\"default\").analyzer(), is(instanceOf(KeywordAnalyzer.class)));\n+ assertThat(as.analyzer(\"default_search\").analyzer(), is(instanceOf(EnglishAnalyzer.class)));\n+ }\n \n+ public void testAnalyzerAliasReferencesAlias() throws IOException {\n+ Settings settings = Settings.builder()\n+ .put(\"index.analysis.analyzer.foobar.alias\",\"default\")\n+ .put(\"index.analysis.analyzer.foobar.type\", \"german\")\n+ .put(\"index.analysis.analyzer.foobar_search.alias\",\"default_search\")\n+ .put(\"index.analysis.analyzer.foobar_search.type\", \"default\")\n+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersion(random()))\n+ .build();\n+ AnalysisRegistry newRegistry = getNewRegistry(settings);\n+ AnalysisService as = getAnalysisService(newRegistry, settings);\n+ assertThat(as.analyzer(\"default\").analyzer(), is(instanceOf(GermanAnalyzer.class)));\n+ // analyzer types are bound early before we resolve aliases\n+ assertThat(as.analyzer(\"default_search\").analyzer(), is(instanceOf(StandardAnalyzer.class)));\n }\n \n- public void testDoubleAlias() throws IOException {\n+ public void testAnalyzerAliasDefault() throws IOException {\n Settings settings = Settings.builder()\n .put(\"index.analysis.analyzer.foobar.alias\",\"default\")\n .put(\"index.analysis.analyzer.foobar.type\", \"keyword\")\n- .put(\"index.analysis.analyzer.barfoo.alias\",\"default\")\n- .put(\"index.analysis.analyzer.barfoo.type\",\"english\")\n .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())\n- .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersion(random()))\n .build();\n AnalysisRegistry newRegistry = getNewRegistry(settings);\n- String message = expectThrows(IllegalStateException.class, () -> getAnalysisService(newRegistry, settings)).getMessage();\n- assertEquals(\"already registered analyzer with name: default\", message);\n+ AnalysisService as = getAnalysisService(newRegistry, settings);\n+ assertThat(as.analyzer(\"default\").analyzer(), is(instanceOf(KeywordAnalyzer.class)));\n+ assertThat(as.analyzer(\"default_search\").analyzer(), is(instanceOf(KeywordAnalyzer.class)));\n }\n \n+ public void testAnalyzerAliasMoreThanOnce() throws IOException {\n+ Settings settings = Settings.builder()\n+ .put(\"index.analysis.analyzer.foobar.alias\",\"default\")\n+ .put(\"index.analysis.analyzer.foobar.type\", \"keyword\")\n+ .put(\"index.analysis.analyzer.foobar1.alias\",\"default\")\n+ .put(\"index.analysis.analyzer.foobar1.type\", \"english\")\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersion(random()))\n+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())\n+ .build();\n+ AnalysisRegistry newRegistry = getNewRegistry(settings);\n+ IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getAnalysisService(newRegistry, settings));\n+ assertEquals(\"alias [default] is already used by [foobar]\", ise.getMessage());\n+ }\n public void testVersionedAnalyzers() throws Exception {\n String yaml = \"/org/elasticsearch/index/analysis/test1.yml\";\n Settings settings2 = Settings.builder()", "filename": "core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java", "status": "modified" }, { "diff": "@@ -32,12 +32,15 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.mapper.MapperParsingException;\n+import org.elasticsearch.index.query.MatchQueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n+import org.elasticsearch.index.query.TermQueryBuilder;\n import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;\n import org.elasticsearch.indices.InvalidAliasNameException;\n import org.elasticsearch.search.SearchHit;\n import org.elasticsearch.test.ESIntegTestCase;\n \n+import java.io.IOException;\n import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.HashSet;", "filename": "core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java", "status": "modified" }, { "diff": "@@ -70,4 +70,51 @@\n settings:\n number_of_shards: 1\n number_of_replicas: 0\n+---\n+\"Put template with analyzer alias\":\n+\n+ - do:\n+ indices.put_template:\n+ name: test\n+ create: true\n+ order: 0\n+ body:\n+ template: test_*\n+ settings:\n+ index.analysis.analyzer.foobar.alias: \"default\"\n+ index.analysis.analyzer.foobar.type: \"keyword\"\n+ index.analysis.analyzer.foobar_search.alias: \"default_search\"\n+ index.analysis.analyzer.foobar_search.type: \"standard\"\n+\n+ - do:\n+ index:\n+ index: test_index\n+ type: test\n+ body: { field: \"the quick brown fox\" }\n+\n+ - do:\n+ indices.refresh:\n+ index: test_index\n+\n+ - do:\n+ search:\n+ index: test_index\n+ type: test\n+ body:\n+ query:\n+ term:\n+ field: \"the quick brown fox\"\n+\n+ - match: {hits.total: 1}\n+\n+ - do:\n+ search:\n+ index: test_index\n+ type: test\n+ body:\n+ query:\n+ match:\n+ field: \"the quick brown fox\"\n+\n+ - match: {hits.total: 0}\n ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.1\n\n**JVM version**: 1.8.0_66\n\n**OS version**: CentOS 7\n\n**Description of the problem including expected versus actual behavior**:\nWhen a new index is auto-created (by indexing a document) using an index template, the setting `index.mapper.dynamic` is not honored: If the document has an unmapped type, it gets indexed nevertheless and the additional type is added to the index.\n\n_Expected result:_ The indexing operation should be rejected as dynamic mapping is disabled for the index created from the template.\n\n**Steps to reproduce**:\n\nCreate template:\n\n```\ncurl -XPUT localhost:9200/_template/test?pretty -d'\n{\n \"template\": \"test_*\",\n \"settings\": {\n \"number_of_shards\": 1,\n \"index.mapper.dynamic\": false\n },\n \"mappings\": {\n \"foo\": {\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n }\n }\n }\n }\n}'\n```\n\nIndex data that does not match the mapping:\n\n```\ncurl -XPOST localhost:9200/test_1/bar?pretty -d'\n{\n \"abc\": \"def\"\n}'\n```\n\nNow, an index with an additional type \"bar\" has been created from the template, although dynamic mapping is set to false.\n\n```\ncurl -XGET localhost:9200/test_1?pretty\n{\n \"test_1\" : {\n \"aliases\" : { },\n \"mappings\" : {\n \"foo\" : {\n \"properties\" : {\n \"name\" : {\n \"type\" : \"string\"\n }\n }\n },\n \"bar\" : {\n \"properties\" : {\n \"abc\" : {\n \"type\" : \"string\"\n }\n }\n }\n },\n \"settings\" : {\n \"index\" : {\n \"mapper\" : {\n \"dynamic\" : \"false\"\n },\n \"creation_date\" : \"1460026755596\",\n \"number_of_shards\" : \"1\",\n \"number_of_replicas\" : \"1\",\n \"uuid\" : \"EGWEI-iARECmY__EEa9jHg\",\n \"version\" : {\n \"created\" : \"2030199\"\n }\n }\n },\n \"warmers\" : { }\n }\n}\n```\n", "comments": [ { "body": "Related to https://github.com/elastic/elasticsearch/issues/17561\n\nThe difference is that, in this example, `index.mapper.dynamic` is set at the index level rather than in the config file in #17561.\n", "created_at": "2016-04-07T16:06:03Z" }, { "body": "Probably caused by https://github.com/elastic/elasticsearch/pull/15424\n", "created_at": "2016-04-07T16:34:48Z" }, { "body": "Delete previous comment - wrong issue\n", "created_at": "2016-05-05T08:03:11Z" }, { "body": "Turns out this has always worked this way, it isn't a regression but it is a bug and could be improved.\n", "created_at": "2016-05-05T08:08:30Z" } ], "number": 17592, "title": "index.mapper.dynamic is not honored during auto-creation of an index from template" }
{ "body": "Today they don't because the create index request that is implicitly created\nadds an empty mapping for the type of the document. So to Elasticsearch it\nlooks like this type was explicitly created and `index.mapper.dynamic` is not\nchecked.\n\nCloses #17592\n", "number": 19478, "review_comments": [], "title": "Automatically created indices should honor `index.mapper.dynamic`." }
{ "commits": [ { "message": "Automatically created indices should honor `index.mapper.dynamic`. #19478\n\nToday they don't because the create index request that is implicitly created\nadds an empty mapping for the type of the document. So to Elasticsearch it\nlooks like this type was explicitly created and `index.mapper.dynamic` is not\nchecked.\n\nCloses #17592" } ], "files": [ { "diff": "@@ -119,29 +119,21 @@ protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener<Bulk\n \n if (needToCheck()) {\n // Keep track of all unique indices and all unique types per index for the create index requests:\n- final Map<String, Set<String>> indicesAndTypes = new HashMap<>();\n+ final Set<String> autoCreateIndices = new HashSet<>();\n for (ActionRequest request : bulkRequest.requests) {\n if (request instanceof DocumentRequest) {\n DocumentRequest req = (DocumentRequest) request;\n- Set<String> types = indicesAndTypes.get(req.index());\n- if (types == null) {\n- indicesAndTypes.put(req.index(), types = new HashSet<>());\n- }\n- types.add(req.type());\n+ autoCreateIndices.add(req.index());\n } else {\n throw new ElasticsearchException(\"Parsed unknown request in bulk actions: \" + request.getClass().getSimpleName());\n }\n }\n- final AtomicInteger counter = new AtomicInteger(indicesAndTypes.size());\n+ final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());\n ClusterState state = clusterService.state();\n- for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {\n- final String index = entry.getKey();\n+ for (String index : autoCreateIndices) {\n if (shouldAutoCreate(index, state)) {\n CreateIndexRequest createIndexRequest = new CreateIndexRequest();\n createIndexRequest.index(index);\n- for (String type : entry.getValue()) {\n- createIndexRequest.mapping(type);\n- }\n createIndexRequest.cause(\"auto(bulk api)\");\n createIndexRequest.masterNodeTimeout(bulkRequest.timeout());\n createIndexAction.execute(createIndexRequest, new ActionListener<CreateIndexResponse>() {", "filename": "core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java", "status": "modified" }, { "diff": "@@ -91,7 +91,6 @@ protected void doExecute(Task task, final IndexRequest request, final ActionList\n if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {\n CreateIndexRequest createIndexRequest = new CreateIndexRequest();\n createIndexRequest.index(request.index());\n- createIndexRequest.mapping(request.type());\n createIndexRequest.cause(\"auto(index api)\");\n createIndexRequest.masterNodeTimeout(request.timeout());\n createIndexAction.execute(task, createIndexRequest, new ActionListener<CreateIndexResponse>() {", "filename": "core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java", "status": "modified" }, { "diff": "@@ -538,7 +538,8 @@ public DocumentMapperForType documentMapperWithAutoCreate(String type) {\n return new DocumentMapperForType(mapper, null);\n }\n if (!dynamic) {\n- throw new TypeMissingException(index(), type, \"trying to auto create mapping, but dynamic mapping is disabled\");\n+ throw new TypeMissingException(index(),\n+ new IllegalStateException(\"trying to auto create mapping, but dynamic mapping is disabled\"), type);\n }\n mapper = parse(type, null, true);\n return new DocumentMapperForType(mapper, mapper.mapping());", "filename": "core/src/main/java/org/elasticsearch/index/mapper/MapperService.java", "status": "modified" }, { "diff": "@@ -33,7 +33,12 @@\n public class TypeMissingException extends ElasticsearchException {\n \n public TypeMissingException(Index index, String... types) {\n- super(\"type[\" + Arrays.toString(types) + \"] missing\");\n+ super(\"type\" + Arrays.toString(types) + \" missing\");\n+ setIndex(index);\n+ }\n+\n+ public TypeMissingException(Index index, Throwable cause, String... types) {\n+ super(\"type\" + Arrays.toString(types) + \" missing\", cause);\n setIndex(index);\n }\n ", "filename": "core/src/main/java/org/elasticsearch/indices/TypeMissingException.java", "status": "modified" } ] }
{ "body": "```\nPUT test \n{\n \"mappings\": {\n \"type\": {\n\n }\n }\n}\n\nGET test/_mapping/field/*\n```\n\nresults in:\n\n```\n...\n \"_parent#null\": {\n \"full_name\": \"_parent#null\",\n \"mapping\": {}\n }\n...\n```\n", "comments": [ { "body": "Pushed a test for that too since I had one already.\n", "created_at": "2016-07-12T13:50:48Z" }, { "body": "I was concerned that the ParentFieldMapper also adds a `_parent#null` doc values field in this case. Luckily this is not the case. The `_parent#null` field only exists in the mapping, but it isn't doing anything.\n", "created_at": "2016-07-18T08:31:20Z" }, { "body": "@martijnvg was the `_parent#null` field only visible with the get field mapping API, or also with the mappings API?\n", "created_at": "2016-07-18T12:34:54Z" }, { "body": "Only with the get-field-mapping API. Doesn't that mean that it is a real Lucene field?\n", "created_at": "2016-07-18T13:59:35Z" }, { "body": "No, it only means that this field can be looked up by name in the mappings.\n", "created_at": "2016-07-18T14:31:59Z" }, { "body": "@jpountz Yes, only the get-field-mapping api. Also I verified that no such field was actually added to the Lucene index.\n", "created_at": "2016-07-18T15:41:23Z" } ], "number": 19389, "title": "New type always adds a _parent#null field if no _parent mapping specified" }
{ "body": "Instead it should just be `_parent` field.\n\nAlso added more tests regarding the join doc values field being added.\n\nCloses #19389\n", "number": 19470, "review_comments": [], "title": " Make sure that no `_parent#null` gets introduces as default _parent mapping" }
{ "commits": [ { "message": "parent/child: Make sure that no `_parent#null` gets introduces as default _parent mapping.\n\nInstead it should just be `_parent` field.\n\nAlso added more tests regarding the join doc values field being added.\n\nCloses #19389" } ], "files": [ { "diff": "@@ -58,9 +58,6 @@\n \n import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue;\n \n-/**\n- *\n- */\n public class ParentFieldMapper extends MetadataFieldMapper {\n \n public static final String NAME = \"_parent\";\n@@ -98,7 +95,7 @@ public Builder type(String type) {\n }\n \n public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) {\n- ((ParentFieldType) fieldType()).setEagerGlobalOrdinals(eagerGlobalOrdinals);\n+ fieldType().setEagerGlobalOrdinals(eagerGlobalOrdinals);\n return builder;\n }\n \n@@ -143,8 +140,8 @@ public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node,\n @Override\n public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {\n KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));\n- MappedFieldType childJoinFieldType = Defaults.FIELD_TYPE.clone();\n- childJoinFieldType.setName(joinField(null));\n+ MappedFieldType childJoinFieldType = new ParentFieldType(Defaults.FIELD_TYPE, typeName);\n+ childJoinFieldType.setName(ParentFieldMapper.NAME);\n return new ParentFieldMapper(parentJoinField, childJoinFieldType, null, indexSettings);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java", "status": "modified" }, { "diff": "@@ -18,17 +18,22 @@\n */\n package org.elasticsearch.index.mapper.parent;\n \n+import org.apache.lucene.index.IndexableField;\n+import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.compress.CompressedXContent;\n import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.Index;\n+import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.analysis.AnalysisService;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.MapperService;\n+import org.elasticsearch.index.mapper.MapperService.MergeReason;\n+import org.elasticsearch.index.mapper.ParseContext;\n import org.elasticsearch.index.mapper.ParsedDocument;\n import org.elasticsearch.index.mapper.SourceToParse;\n import org.elasticsearch.index.similarity.SimilarityService;\n@@ -59,23 +64,42 @@ public void testParentSetInDocNotAllowed() throws Exception {\n }\n }\n \n- public void testParentSet() throws Exception {\n+ public void testJoinFieldSet() throws Exception {\n+ String parentMapping = XContentFactory.jsonBuilder().startObject().startObject(\"parent_type\")\n+ .endObject().endObject().string();\n+ String childMapping = XContentFactory.jsonBuilder().startObject().startObject(\"child_type\")\n+ .startObject(\"_parent\").field(\"type\", \"parent_type\").endObject()\n+ .endObject().endObject().string();\n+ IndexService indexService = createIndex(\"test\");\n+ indexService.mapperService().merge(\"parent_type\", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false);\n+ indexService.mapperService().merge(\"child_type\", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false);\n+\n+ // Indexing parent doc:\n+ DocumentMapper parentDocMapper = indexService.mapperService().documentMapper(\"parent_type\");\n+ ParsedDocument doc = parentDocMapper.parse(SourceToParse.source(\"test\", \"parent_type\", \"1122\", new BytesArray(\"{}\")));\n+ assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc()));\n+ assertEquals(\"1122\", doc.rootDoc().getBinaryValue(\"_parent#parent_type\").utf8ToString());\n+\n+ // Indexing child doc:\n+ DocumentMapper childDocMapper = indexService.mapperService().documentMapper(\"child_type\");\n+ doc = childDocMapper.parse(SourceToParse.source(\"test\", \"child_type\", \"1\", new BytesArray(\"{}\")).parent(\"1122\"));\n+\n+ assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc()));\n+ assertEquals(\"1122\", doc.rootDoc().getBinaryValue(\"_parent#parent_type\").utf8ToString());\n+ }\n+\n+ public void testJoinFieldNotSet() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n- .startObject(\"_parent\").field(\"type\", \"p_type\").endObject()\n .endObject().endObject().string();\n DocumentMapper docMapper = createIndex(\"test\").mapperService().documentMapperParser().parse(\"type\", new CompressedXContent(mapping));\n-\n ParsedDocument doc = docMapper.parse(SourceToParse.source(\"test\", \"type\", \"1\", XContentFactory.jsonBuilder()\n .startObject()\n .field(\"x_field\", \"x_value\")\n .endObject()\n- .bytes()).parent(\"1122\"));\n-\n- assertEquals(\"1122\", doc.rootDoc().getBinaryValue(\"_parent#p_type\").utf8ToString());\n+ .bytes()));\n+ assertEquals(0, getNumberOfFieldWithParentPrefix(doc.rootDoc()));\n }\n \n-\n- @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/19389\")\n public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception {\n Index index = new Index(\"_index\", \"testUUID\");\n IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY);\n@@ -88,8 +112,19 @@ public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception {\n .startObject(\"properties\")\n .endObject()\n .endObject().endObject();\n- mapperService.merge(\"some_type\", new CompressedXContent(mappingSource.string()), MapperService.MergeReason.MAPPING_UPDATE, false);\n+ mapperService.merge(\"some_type\", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE, false);\n Set<String> allFields = new HashSet<>(mapperService.simpleMatchToIndexNames(\"*\"));\n+ assertTrue(allFields.contains(\"_parent\"));\n assertFalse(allFields.contains(\"_parent#null\"));\n }\n+\n+ private static int getNumberOfFieldWithParentPrefix(ParseContext.Document doc) {\n+ int numFieldWithParentPrefix = 0;\n+ for (IndexableField field : doc) {\n+ if (field.name().startsWith(\"_parent\")) {\n+ numFieldWithParentPrefix++;\n+ }\n+ }\n+ return numFieldWithParentPrefix;\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java", "status": "modified" } ] }
{ "body": "The RepositoryUpgradabilityIT test fails regularly because indices are deleted before trying to recreate the index for other purposes. We could add `ensureGreen` calls after the index deletions, but the underlying cause of failure is that the ongoing recoveries get cancelled and this causes shard level data to be deleted. However, the deletion of these files can happen _while_ other threads are holding onto these files for the purposes of writes, which trips assertions where the write operations expects the temp files to be there (but they have been deleted by the recovery cancellation).\n\nAn example stack trace:\n\n``````\nERROR 21.6s J0 | RepositoryUpgradabilityIT.testRepositoryWorksWithCrossVersions <<< FAILURES!\n > Throwable #1: com.carrotsearch.randomizedtesting.UncaughtExceptionError: Captured an uncaught exception in thread: Thread[id=11223, name=elasticsearch[node_td1][generic][T#6], state=RUNNABLE, group=TGRP-RepositoryUpgradabilityIT]\n > at __randomizedtesting.SeedInfo.seed([6C53C46436F84D7A:76F1740152D478D]:0)\n > Caused by: java.lang.AssertionError: expected: [recovery.1468504452288._0.cfe] in []\n > at __randomizedtesting.SeedInfo.seed([6C53C46436F84D7A]:0)\n > at org.elasticsearch.indices.recovery.RecoveryTarget.assertTempFileExists(RecoveryTarget.java:407)\n > at org.elasticsearch.indices.recovery.RecoveryTarget.writeFileChunk(RecoveryTarget.java:397)\n > at org.elasticsearch.indices.recovery.RecoveryTargetService$FileChunkTransportRequestHandler.messageReceived(RecoveryTargetService.java:417)\n > at org.elasticsearch.indices.recovery.RecoveryTargetService$FileChunkTransportRequestHandler.messageReceived(RecoveryTargetService.java:390)\n > at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\n > at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69)\n > at org.elasticsearch.transport.local.LocalTransport$1.doRun(LocalTransport.java:322)\n > at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:510)\n > at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n > at java.lang.Thread.run(Thread.java:745)```\n``````\n", "comments": [ { "body": "Is this fixed by #19466?\n", "created_at": "2016-07-18T13:16:45Z" }, { "body": "Yes, I just created the issue for it to link to.\n", "created_at": "2016-07-18T13:22:18Z" } ], "number": 19473, "title": "Cancellation of recovery deletes files still held onto by writes" }
{ "body": "Today when we reset a recovery because of the source not being\nready or the shard is getting removed on the source (for whatever reason)\nwe wipe all temp files and reset the recovery without respecting any\nreference counting or locking etc. all streams are closed and files are\nwiped. Yet, this is problematic since we assert that some files are on disk\netc. when we finish writing a file. These assertions don't hold anymore if we\nconcurrently wipe the tmp files.\n\nThis change moves the logic out of RecoveryTarget into RecoveriesCollection which\nbasically clones the RecoveryTarget on reset instead which allows in-flight operations\nto finish gracefully. This means we now have a single path for cleanups in RecoveryTarget\nand can safely use assertions in the class since files won't be removed unless the recovery\nis either cancelled, failed or finished.\n\nCloses #19473\n", "number": 19466, "review_comments": [ { "body": "we need to remove this from onGoingRecoveries and only call cancel if it was found, otherwise we may leave a lingering recovery id, pointing at a cancelled recovery.\n", "created_at": "2016-07-18T10:09:21Z" }, { "body": "I'm a bit concerned with exposing this close() as public is - someone may think this is the way to finish / clean up (which you should call `cancel` or `markAsDone`). How about calling this `closeAndStartNew` and make it return the new RecoveryTarget ? this also means we can make the copy constructor private, which is a good thing?\n", "created_at": "2016-07-18T10:14:08Z" }, { "body": "hmm.. wondering if we should add a unique generation to these files (or sample the idGenerator again when we restart). Right now we will have collisions between temp names of the previous recovery target and the new one, but I think we want complete separation? \n", "created_at": "2016-07-18T10:15:43Z" }, { "body": "wondering if we should switch `LocalTransportAddress.buildUnique()` to `new LocalTransportAddress(id)`\n", "created_at": "2016-07-18T10:38:41Z" }, { "body": "why public? protected not good enough?\n", "created_at": "2016-07-18T10:39:26Z" }, { "body": "yay\n", "created_at": "2016-07-18T10:40:48Z" }, { "body": "I think we want createGroup(0)?\n", "created_at": "2016-07-18T10:48:01Z" }, { "body": "I spent some time thinking how we can fold this into the ReplicationGroup in a clean way. I couldn't come up with anything fantastic - the best way I had was to make the actually recovery code in ReplicationGroup be a method on it's own which these tests can override and only do the recovery collection part. not sure how much cleaner that will be.\n", "created_at": "2016-07-18T11:31:47Z" }, { "body": "I think we can just stick with it once we need it again, refactor as we go..\n", "created_at": "2016-07-18T14:27:27Z" }, { "body": "agreed\n", "created_at": "2016-07-18T14:28:50Z" }, { "body": "nit: now that we folded the close and performRecoveryRestart into `status.resetRecovery()` we don't need the success pattern anymore. This can be:\n\n```\nif (onGoingRecoveries.replace(id, status, resetRecovery) == false) {\n resetRecovery.cancel(\"replace failed\");\n throw new IllegalStateException(\"failed to replace recovery target\");\n}\n\n```\n", "created_at": "2016-07-18T19:26:39Z" }, { "body": "++\n", "created_at": "2016-07-18T19:27:12Z" }, { "body": ":)\n", "created_at": "2016-07-18T19:27:30Z" }, { "body": "copying from a comment on the commit- \n\nnit: now that we folded the close and performRecoveryRestart into `status.resetRecovery()` we don't need the success pattern anymore. This can be:\n\n```\nif (onGoingRecoveries.replace(id, status, resetRecovery) == false) {\n resetRecovery.cancel(\"replace failed\");\n throw new IllegalStateException(\"failed to replace recovery target\");\n}\n\n```\n", "created_at": "2016-07-18T20:27:42Z" } ], "title": "Move `reset recovery` into RecoveriesCollection" }
{ "commits": [ { "message": "Move `reset recovery` into RecoveriesCollection\n\nToday when we reset a recovery because of the source not being\nready or the shard is getting removed on the source (for whatever reason)\nwe wipe all temp files and reset the recovery without respecting any\nreference counting or locking etc. all streams are closed and files are\nwiped. Yet, this is problematic since we assert that some files are on disk\netc. when we finish writing a file. These assertions don't hold anymore if we\nconcurrently wipe the tmp files.\n\nThis change moves the logic out of RecoveryTarget into RecoveriesCollection which\nbascially clones the RecoveryTarget on reset instead which allows in-flight operations\nto finish gracefully. This means we now have a single path for cleanups in RecoveryTarget\nand can safely use assertions in the class since files won't be removed unless the recovery\nis either cancled, failed or finished." }, { "message": "fix typo" }, { "message": "apply review comments" }, { "message": "fix tests" }, { "message": "last comments" } ], "files": [ { "diff": "@@ -1317,9 +1317,11 @@ public long verify() throws CorruptIndexException {\n }\n \n public void deleteQuiet(String... files) {\n+ ensureOpen();\n+ StoreDirectory directory = this.directory;\n for (String file : files) {\n try {\n- directory().deleteFile(file);\n+ directory.deleteFile(\"Store.deleteQuiet\", file);\n } catch (Exception ex) {\n // ignore :(\n }", "filename": "core/src/main/java/org/elasticsearch/index/store/Store.java", "status": "modified" }, { "diff": "@@ -30,6 +30,8 @@\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.threadpool.ThreadPool;\n \n+import java.io.IOException;\n+import java.io.UncheckedIOException;\n import java.util.concurrent.ConcurrentMap;\n import java.util.concurrent.atomic.AtomicBoolean;\n \n@@ -68,6 +70,27 @@ public long startRecovery(IndexShard indexShard, DiscoveryNode sourceNode,\n return status.recoveryId();\n }\n \n+\n+ /**\n+ * Resets the recovery and performs a recovery restart on the currently recovering index shard\n+ *\n+ * @see IndexShard#performRecoveryRestart()\n+ */\n+ public void resetRecovery(long id, ShardId shardId) throws IOException {\n+ try (RecoveryRef ref = getRecoverySafe(id, shardId)) {\n+ // instead of adding complicated state to RecoveryTarget we just flip the\n+ // target instance when we reset a recovery, that way we have only one cleanup\n+ // path on the RecoveryTarget and are always within the bounds of ref-counting\n+ // which is important since we verify files are on disk etc. after we have written them etc.\n+ RecoveryTarget status = ref.status();\n+ RecoveryTarget resetRecovery = status.resetRecovery();\n+ if (onGoingRecoveries.replace(id, status, resetRecovery) == false) {\n+ resetRecovery.cancel(\"replace failed\");\n+ throw new IllegalStateException(\"failed to replace recovery target\");\n+ }\n+ }\n+ }\n+\n /**\n * gets the {@link RecoveryTarget } for a given id. The RecoveryStatus returned has it's ref count already incremented\n * to make sure it's safe to use. However, you must call {@link RecoveryTarget#decRef()} when you are done with it, typically", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.ExceptionsHelper;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.common.UUIDs;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.logging.ESLogger;\n import org.elasticsearch.common.logging.Loggers;\n@@ -78,27 +79,36 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget\n private final AtomicBoolean finished = new AtomicBoolean();\n \n private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();\n- private final CancellableThreads cancellableThreads = new CancellableThreads();\n+ private final CancellableThreads cancellableThreads;\n \n // last time this status was accessed\n private volatile long lastAccessTime = System.nanoTime();\n \n private final Map<String, String> tempFileNames = ConcurrentCollections.newConcurrentMap();\n \n+ private RecoveryTarget(RecoveryTarget copyFrom) { // copy constructor\n+ this(copyFrom.indexShard(), copyFrom.sourceNode(), copyFrom.listener, copyFrom.cancellableThreads, copyFrom.recoveryId());\n+ }\n+\n public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener) {\n+ this(indexShard, sourceNode, listener, new CancellableThreads(), idGenerator.incrementAndGet());\n+ }\n \n+ private RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener,\n+ CancellableThreads cancellableThreads, long recoveryId) {\n super(\"recovery_status\");\n- this.recoveryId = idGenerator.incrementAndGet();\n+ this.cancellableThreads = cancellableThreads;\n+ this.recoveryId = recoveryId;\n this.listener = listener;\n this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());\n this.indexShard = indexShard;\n this.sourceNode = sourceNode;\n this.shardId = indexShard.shardId();\n- this.tempFilePrefix = RECOVERY_PREFIX + indexShard.recoveryState().getTimer().startTime() + \".\";\n+ this.tempFilePrefix = RECOVERY_PREFIX + UUIDs.base64UUID() + \".\";\n this.store = indexShard.store();\n- indexShard.recoveryStats().incCurrentAsTarget();\n // make sure the store is not released until we are done.\n store.incRef();\n+ indexShard.recoveryStats().incCurrentAsTarget();\n }\n \n public long recoveryId() {\n@@ -151,6 +161,21 @@ public void renameAllTempFiles() throws IOException {\n store.renameTempFilesSafe(tempFileNames);\n }\n \n+ /**\n+ * Closes the current recovery target and returns a\n+ * clone to reset the ongoing recovery\n+ */\n+ RecoveryTarget resetRecovery() throws IOException {\n+ ensureRefCount();\n+ RecoveryTarget copy = new RecoveryTarget(this);\n+ if (finished.compareAndSet(false, true)) {\n+ // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now\n+ decRef();\n+ }\n+ indexShard.performRecoveryRestart();\n+ return copy;\n+ }\n+\n /**\n * cancel the recovery. calling this method will clean temporary files and release the store\n * unless this object is in use (in which case it will be cleaned once all ongoing users call\n@@ -243,42 +268,33 @@ public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData meta\n return indexOutput;\n }\n \n- public void resetRecovery() throws IOException {\n- cleanOpenFiles();\n- indexShard().performRecoveryRestart();\n- }\n-\n @Override\n protected void closeInternal() {\n try {\n- cleanOpenFiles();\n+ // clean open index outputs\n+ Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();\n+ while (iterator.hasNext()) {\n+ Map.Entry<String, IndexOutput> entry = iterator.next();\n+ logger.trace(\"closing IndexOutput file [{}]\", entry.getValue());\n+ try {\n+ entry.getValue().close();\n+ } catch (Exception e) {\n+ logger.debug(\"error while closing recovery output [{}]\", e, entry.getValue());\n+ }\n+ iterator.remove();\n+ }\n+ // trash temporary files\n+ for (String file : tempFileNames.keySet()) {\n+ logger.trace(\"cleaning temporary file [{}]\", file);\n+ store.deleteQuiet(file);\n+ }\n } finally {\n // free store. increment happens in constructor\n store.decRef();\n indexShard.recoveryStats().decCurrentAsTarget();\n }\n }\n \n- protected void cleanOpenFiles() {\n- // clean open index outputs\n- Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();\n- while (iterator.hasNext()) {\n- Map.Entry<String, IndexOutput> entry = iterator.next();\n- logger.trace(\"closing IndexOutput file [{}]\", entry.getValue());\n- try {\n- entry.getValue().close();\n- } catch (Exception e) {\n- logger.debug(\"error while closing recovery output [{}]\", e, entry.getValue());\n- }\n- iterator.remove();\n- }\n- // trash temporary files\n- for (String file : tempFileNames.keySet()) {\n- logger.trace(\"cleaning temporary file [{}]\", file);\n- store.deleteQuiet(file);\n- }\n- }\n-\n @Override\n public String toString() {\n return shardId + \" [\" + recoveryId + \"]\";\n@@ -394,23 +410,11 @@ public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesR\n indexOutput.close();\n }\n final String temporaryFileName = getTempNameForFile(name);\n- assert assertTempFileExists(temporaryFileName);\n+ assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName) :\n+ \"expected: [\" + temporaryFileName + \"] in \" + Arrays.toString(store.directory().listAll());\n store.directory().sync(Collections.singleton(temporaryFileName));\n IndexOutput remove = removeOpenIndexOutputs(name);\n assert remove == null || remove == indexOutput; // remove maybe null if we got finished\n }\n }\n-\n- private boolean assertTempFileExists(String temporaryFileName) throws IOException {\n- try {\n- assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName) :\n- \"expected: [\" + temporaryFileName + \"] in \" + Arrays.toString(store.directory().listAll());\n- } catch (AssertionError error) {\n- if (finished.get() == false) {\n- // if we got canceled stuff might not be here anymore..\n- throw error;\n- }\n- }\n- return true;\n- }\n }", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java", "status": "modified" }, { "diff": "@@ -150,7 +150,7 @@ protected void retryRecovery(final RecoveryTarget recoveryTarget, final String r\n \n private void retryRecovery(final RecoveryTarget recoveryTarget, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {\n try {\n- recoveryTarget.resetRecovery();\n+ onGoingRecoveries.resetRecovery(recoveryTarget.recoveryId(), recoveryTarget.shardId());\n } catch (Exception e) {\n onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(currentRequest, e), true);\n }", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java", "status": "modified" }, { "diff": "@@ -29,6 +29,8 @@\n import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.admin.indices.flush.FlushRequest;\n+import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;\n+import org.elasticsearch.action.admin.indices.stats.IndexShardStats;\n import org.elasticsearch.action.index.IndexRequest;\n import org.elasticsearch.action.index.IndexResponse;\n import org.elasticsearch.action.index.TransportIndexAction;\n@@ -61,6 +63,7 @@\n import org.elasticsearch.index.mapper.internal.UidFieldMapper;\n import org.elasticsearch.index.shard.IndexEventListener;\n import org.elasticsearch.index.shard.IndexShard;\n+import org.elasticsearch.index.shard.IndexShardState;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.shard.ShardPath;\n import org.elasticsearch.index.similarity.SimilarityService;\n@@ -106,7 +109,7 @@\n \n public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {\n \n- private ThreadPool threadPool;\n+ protected ThreadPool threadPool;\n private final Index index = new Index(\"test\", \"uuid\");\n private final ShardId shardId = new ShardId(index, 0);\n private final Map<String, String> indexMapping = Collections.singletonMap(\"type\", \"{ \\\"type\\\": {} }\");\n@@ -160,14 +163,15 @@ public void renameAllTempFiles() throws IOException {\n }\n }\n \n-\n- @Before\n- public void setup() {\n+ @Override\n+ public void setUp() throws Exception {\n+ super.setUp();\n threadPool = new TestThreadPool(getClass().getName());\n }\n \n- @After\n- public void destroy() {\n+ @Override\n+ public void tearDown() throws Exception {\n+ super.tearDown();\n ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);\n }\n \n@@ -197,8 +201,8 @@ protected ReplicationGroup createGroup(int replicas) throws IOException {\n return new ReplicationGroup(metaData, homePath);\n }\n \n- private DiscoveryNode getDiscoveryNode(String id) {\n- return new DiscoveryNode(id, id, LocalTransportAddress.buildUnique(), Collections.emptyMap(),\n+ protected DiscoveryNode getDiscoveryNode(String id) {\n+ return new DiscoveryNode(id, id, new LocalTransportAddress(id), Collections.emptyMap(),\n Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT);\n }\n \n@@ -227,7 +231,7 @@ private IndexShard newShard(boolean primary, DiscoveryNode node, IndexMetaData i\n }\n \n \n- class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> {\n+ protected class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> {\n private final IndexShard primary;\n private final List<IndexShard> replicas;\n private final IndexMetaData indexMetaData;\n@@ -279,15 +283,21 @@ public synchronized IndexShard addReplica() throws IOException {\n replicas.add(replica);\n return replica;\n }\n-\n public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier)\n throws IOException {\n- final DiscoveryNode pNode;\n- synchronized (this) {\n- pNode = getDiscoveryNode(primary.routingEntry().currentNodeId());\n- }\n+ recoverReplica(replica, targetSupplier, true);\n+ }\n+\n+ public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,\n+ boolean markAsRecovering)\n+ throws IOException {\n+ final DiscoveryNode pNode = getPrimaryNode();\n final DiscoveryNode rNode = getDiscoveryNode(replica.routingEntry().currentNodeId());\n- replica.markAsRecovering(\"remote\", new RecoveryState(replica.shardId(), false, RecoveryState.Type.REPLICA, pNode, rNode));\n+ if (markAsRecovering) {\n+ replica.markAsRecovering(\"remote\", new RecoveryState(replica.shardId(), false, RecoveryState.Type.REPLICA, pNode, rNode));\n+ } else {\n+ assertEquals(replica.state(), IndexShardState.RECOVERING);\n+ }\n replica.prepareForIndexRecovery();\n RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);\n StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,\n@@ -299,6 +309,10 @@ public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryN\n replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));\n }\n \n+ public synchronized DiscoveryNode getPrimaryNode() {\n+ return getDiscoveryNode(primary.routingEntry().currentNodeId());\n+ }\n+\n public Future<Void> asyncRecoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier)\n throws IOException {\n FutureTask<Void> task = new FutureTask<>(() -> {\n@@ -375,6 +389,10 @@ public synchronized void close() throws Exception {\n public Iterator<IndexShard> iterator() {\n return Iterators.<IndexShard>concat(replicas.iterator(), Collections.singleton(primary).iterator());\n }\n+\n+ public IndexShard getPrimary() {\n+ return primary;\n+ }\n }\n \n class IndexingOp extends ReplicationOperation<IndexRequest, IndexRequest, IndexingResult> {", "filename": "core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java", "status": "modified" }, { "diff": "@@ -36,8 +36,6 @@\n import static java.util.Collections.emptyMap;\n import static java.util.Collections.emptySet;\n \n-/**\n- */\n public class RecoveryStatusTests extends ESSingleNodeTestCase {\n \n public void testRenameTempFiles() throws IOException {\n@@ -73,7 +71,7 @@ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, bo\n Set<String> strings = Sets.newHashSet(status.store().directory().listAll());\n String expectedFile = null;\n for (String file : strings) {\n- if (Pattern.compile(\"recovery[.]\\\\d+[.]foo[.]bar\").matcher(file).matches()) {\n+ if (Pattern.compile(\"recovery[.][\\\\w-]+[.]foo[.]bar\").matcher(file).matches()) {\n expectedFile = file;\n break;\n }", "filename": "core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java", "status": "modified" }, { "diff": "@@ -18,12 +18,15 @@\n */\n package org.elasticsearch.recovery;\n \n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.cluster.routing.TestShardRouting;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.transport.LocalTransportAddress;\n import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase;\n import org.elasticsearch.index.shard.IndexShard;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.indices.IndicesService;\n@@ -32,8 +35,12 @@\n import org.elasticsearch.indices.recovery.RecoveryState;\n import org.elasticsearch.indices.recovery.RecoveryTargetService;\n import org.elasticsearch.test.ESSingleNodeTestCase;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n+import org.junit.After;\n+import org.junit.Before;\n \n+import java.io.IOException;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.TimeUnit;\n import java.util.concurrent.atomic.AtomicBoolean;\n@@ -43,7 +50,7 @@\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.lessThan;\n \n-public class RecoveriesCollectionTests extends ESSingleNodeTestCase {\n+public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase {\n static final RecoveryTargetService.RecoveryListener listener = new RecoveryTargetService.RecoveryListener() {\n @Override\n public void onRecoveryDone(RecoveryState state) {\n@@ -57,83 +64,115 @@ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, bo\n };\n \n public void testLastAccessTimeUpdate() throws Exception {\n- createIndex();\n- final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));\n- final long recoveryId = startRecovery(collection);\n- try (RecoveriesCollection.RecoveryRef status = collection.getRecovery(recoveryId)) {\n- final long lastSeenTime = status.status().lastAccessTime();\n- assertBusy(new Runnable() {\n- @Override\n- public void run() {\n+ try (ReplicationGroup shards = createGroup(0)) {\n+ final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool);\n+ final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica());\n+ try (RecoveriesCollection.RecoveryRef status = collection.getRecovery(recoveryId)) {\n+ final long lastSeenTime = status.status().lastAccessTime();\n+ assertBusy(() -> {\n try (RecoveriesCollection.RecoveryRef currentStatus = collection.getRecovery(recoveryId)) {\n assertThat(\"access time failed to update\", lastSeenTime, lessThan(currentStatus.status().lastAccessTime()));\n }\n- }\n- });\n- } finally {\n- collection.cancelRecovery(recoveryId, \"life\");\n+ });\n+ } finally {\n+ collection.cancelRecovery(recoveryId, \"life\");\n+ }\n }\n }\n \n- public void testRecoveryTimeout() throws InterruptedException {\n- createIndex();\n- final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));\n- final AtomicBoolean failed = new AtomicBoolean();\n- final CountDownLatch latch = new CountDownLatch(1);\n- final long recoveryId = startRecovery(collection, new RecoveryTargetService.RecoveryListener() {\n- @Override\n- public void onRecoveryDone(RecoveryState state) {\n- latch.countDown();\n- }\n+ public void testRecoveryTimeout() throws Exception {\n+ try (ReplicationGroup shards = createGroup(0)) {\n+ final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool);\n+ final AtomicBoolean failed = new AtomicBoolean();\n+ final CountDownLatch latch = new CountDownLatch(1);\n+ final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica(),\n+ new RecoveryTargetService.RecoveryListener() {\n+ @Override\n+ public void onRecoveryDone(RecoveryState state) {\n+ latch.countDown();\n+ }\n \n- @Override\n- public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {\n- failed.set(true);\n- latch.countDown();\n+ @Override\n+ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {\n+ failed.set(true);\n+ latch.countDown();\n+ }\n+ }, TimeValue.timeValueMillis(100));\n+ try {\n+ latch.await(30, TimeUnit.SECONDS);\n+ assertTrue(\"recovery failed to timeout\", failed.get());\n+ } finally {\n+ collection.cancelRecovery(recoveryId, \"meh\");\n }\n- }, TimeValue.timeValueMillis(100));\n- try {\n- latch.await(30, TimeUnit.SECONDS);\n- assertTrue(\"recovery failed to timeout\", failed.get());\n- } finally {\n- collection.cancelRecovery(recoveryId, \"meh\");\n }\n \n }\n \n public void testRecoveryCancellation() throws Exception {\n- createIndex();\n- final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));\n- final long recoveryId = startRecovery(collection);\n- final long recoveryId2 = startRecovery(collection);\n- try (RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId)) {\n- ShardId shardId = recoveryRef.status().shardId();\n- assertTrue(\"failed to cancel recoveries\", collection.cancelRecoveriesForShard(shardId, \"test\"));\n- assertThat(\"all recoveries should be cancelled\", collection.size(), equalTo(0));\n- } finally {\n- collection.cancelRecovery(recoveryId, \"meh\");\n- collection.cancelRecovery(recoveryId2, \"meh\");\n+ try (ReplicationGroup shards = createGroup(0)) {\n+ final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool);\n+ final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica());\n+ final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica());\n+ try (RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId)) {\n+ ShardId shardId = recoveryRef.status().shardId();\n+ assertTrue(\"failed to cancel recoveries\", collection.cancelRecoveriesForShard(shardId, \"test\"));\n+ assertThat(\"all recoveries should be cancelled\", collection.size(), equalTo(0));\n+ } finally {\n+ collection.cancelRecovery(recoveryId, \"meh\");\n+ collection.cancelRecovery(recoveryId2, \"meh\");\n+ }\n }\n }\n \n- protected void createIndex() {\n- createIndex(\"test\",\n- Settings.builder()\n- .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)\n- .build());\n- ensureGreen();\n+ public void testResetRecovery() throws Exception {\n+ try (ReplicationGroup shards = createGroup(0)) {\n+ shards.startAll();\n+ int numDocs = randomIntBetween(1, 15);\n+ shards.indexDocs(numDocs);\n+ final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool);\n+ IndexShard shard = shards.addReplica();\n+ final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard);\n+ try (RecoveriesCollection.RecoveryRef recovery = collection.getRecovery(recoveryId)) {\n+ final int currentAsTarget = shard.recoveryStats().currentAsTarget();\n+ final int referencesToStore = recovery.status().store().refCount();\n+ String tempFileName = recovery.status().getTempNameForFile(\"foobar\");\n+ collection.resetRecovery(recoveryId, recovery.status().shardId());\n+ try (RecoveriesCollection.RecoveryRef resetRecovery = collection.getRecovery(recoveryId)) {\n+ assertNotSame(recovery.status(), resetRecovery);\n+ assertSame(recovery.status().CancellableThreads(), resetRecovery.status().CancellableThreads());\n+ assertSame(recovery.status().indexShard(), resetRecovery.status().indexShard());\n+ assertSame(recovery.status().store(), resetRecovery.status().store());\n+ assertEquals(referencesToStore + 1, resetRecovery.status().store().refCount());\n+ assertEquals(currentAsTarget+1, shard.recoveryStats().currentAsTarget()); // we blink for a short moment...\n+ recovery.close();\n+ expectThrows(ElasticsearchException.class, () -> recovery.status().store());\n+ assertEquals(referencesToStore, resetRecovery.status().store().refCount());\n+ String resetTempFileName = resetRecovery.status().getTempNameForFile(\"foobar\");\n+ assertNotEquals(tempFileName, resetTempFileName);\n+ }\n+ assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget());\n+ }\n+ try (RecoveriesCollection.RecoveryRef resetRecovery = collection.getRecovery(recoveryId)) {\n+ shards.recoverReplica(shard, (s, n) -> {\n+ assertSame(s, resetRecovery.status().indexShard());\n+ return resetRecovery.status();\n+ }, false);\n+ }\n+ shards.assertAllEqual(numDocs);\n+ assertNull(\"recovery is done\", collection.getRecovery(recoveryId));\n+ }\n }\n \n-\n- long startRecovery(RecoveriesCollection collection) {\n- return startRecovery(collection, listener, TimeValue.timeValueMinutes(60));\n+ long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard shard) {\n+ return startRecovery(collection,sourceNode, shard, listener, TimeValue.timeValueMinutes(60));\n }\n \n- long startRecovery(RecoveriesCollection collection, RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) {\n- IndicesService indexServices = getInstanceFromNode(IndicesService.class);\n- IndexShard indexShard = indexServices.indexServiceSafe(resolveIndex(\"test\")).getShardOrNull(0);\n- final DiscoveryNode sourceNode = new DiscoveryNode(\"id\", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(),\n- Version.CURRENT);\n+ long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard indexShard,\n+ RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) {\n+ final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId());\n+ indexShard.markAsRecovering(\"remote\", new RecoveryState(indexShard.shardId(), false, RecoveryState.Type.REPLICA, sourceNode,\n+ rNode));\n+ indexShard.prepareForIndexRecovery();\n return collection.startRecovery(indexShard, sourceNode, listener, timeValue);\n }\n }", "filename": "core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java", "status": "modified" } ] }
{ "body": "The REST test for the shrink API fails:\n\n```\nSuite: org.elasticsearch.backwards.MultiNodeBackwardsIT\n 1> [2016-07-13 08:28:05,799][INFO ][org.elasticsearch.test.rest.client] REST client initialized [http://[::1]:33103], elasticsearch version: [5.0.0-alpha5]\n 1> [2016-07-13 08:28:05,807][INFO ][org.elasticsearch.test.rest.client] REST client initialized [http://[::1]:33103], elasticsearch version: [5.0.0-alpha5]\n 1> [2016-07-13 08:28:06,346][INFO ][org.elasticsearch.backwards] Stash dump on failure [{\n 1> \"stash\" : {\n 1> \"body\" : {\n 1> \"cluster_name\" : \"qa_backwards-5.0_integTest\",\n 1> \"status\" : \"green\",\n 1> \"timed_out\" : false,\n 1> \"number_of_nodes\" : 2,\n 1> \"number_of_data_nodes\" : 2,\n 1> \"active_primary_shards\" : 5,\n 1> \"active_shards\" : 5,\n 1> \"relocating_shards\" : 0,\n 1> \"initializing_shards\" : 0,\n 1> \"unassigned_shards\" : 0,\n 1> \"delayed_unassigned_shards\" : 0,\n 1> \"number_of_pending_tasks\" : 0,\n 1> \"number_of_in_flight_fetch\" : 0,\n 1> \"task_max_waiting_in_queue_millis\" : 0,\n 1> \"active_shards_percent_as_number\" : 100.0\n 1> },\n 1> \"master\" : \"sBM60RLuTiuBlZutXBWnGg\"\n 1> }\n 1> }]\n 2> REPRODUCE WITH: gradle :qa:backwards-5.0:integTest -Dtests.seed=529E08C5D51BBDF9 -Dtests.class=org.elasticsearch.backwards.MultiNodeBackwardsIT -Dtests.method=\"test {p0=indices.shrink/10_basic/Shrink index via API}\" -Dtests.security.manager=true -Dtests.locale=es-MX -Dtests.timezone=Indian/Chagos\nFAILURE 0.75s | MultiNodeBackwardsIT.test {p0=indices.shrink/10_basic/Shrink index via API} <<< FAILURES!\n > Throwable #1: java.lang.AssertionError: expected [2xx] status code but api [indices.shrink] returned [500 Internal Server Error] [{\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"index source must have all shards allocated on the same node to shrink index\"}],\"type\":\"illegal_state_exception\",\"reason\":\"index source must have all shards allocated on the same node to shrink index\"},\"status\":500}]\n > at __randomizedtesting.SeedInfo.seed([529E08C5D51BBDF9:DACA371F7BE7D001]:0)\n > at org.elasticsearch.test.rest.section.DoSection.execute(DoSection.java:108)\n > at org.elasticsearch.test.rest.ESRestTestCase.test(ESRestTestCase.java:399)\n > at java.lang.Thread.run(Thread.java:745)\n 2> NOTE: leaving temporary files on disk at: /home/hinmanm/es/elasticsearch/qa/backwards-5.0/build/testrun/integTest/J0/temp/org.elasticsearch.backwards.MultiNodeBackwardsIT_529E08C5D51BBDF9-004\n 2> NOTE: test params are: codec=Asserting(Lucene60): {}, docValues:{}, maxPointsInLeafNode=806, maxMBSortInHeap=6.315786973231347, sim=RandomSimilarity(queryNorm=true,coord=crazy): {}, locale=es-MX, timezone=Indian/Chagos\n 2> NOTE: Linux 4.6.3-300.fc24.x86_64 amd64/Oracle Corporation 1.8.0_92 (64-bit)/cpus=8,threads=1,free=417462824,total=514850816\n 2> NOTE: All tests run in this JVM: [MultiNodeBackwardsIT]\nCompleted [1/1] in 0.99s, 1 test, 1 failure <<< FAILURES!\n```\n\nReproduction line (reproduces every time for me):\n\n```\ngradle :qa:backwards-5.0:integTest -Dtests.seed=529E08C5D51BBDF9 -Dtests.class=org.elasticsearch.backwards.MultiNodeBackwardsIT -Dtests.method=\"test {p0=indices.shrink/10_basic/Shrink index via API}\" -Dtests.security.manager=true -Dtests.locale=es-MX -Dtests.timezone=Indian/Chagos\n```\n\nI'm guessing this is because the test assumes that there will only be a single node, but there are two present for the backwards compatibility tests.\n", "comments": [ { "body": "@dakrone where did you see this, are there any node logs etc. this has nothing todo with 2 nodes, it should work as it waits for the relocation\n", "created_at": "2016-07-14T09:05:14Z" }, { "body": "I think I know what the problem is, the test doesn't wait for all shards to be active before it uses allocation filtering so the settings call might return successfully but the wait call that happens next is returning immediately since the next reroute is not fully applied and the shard has not yet been relocated. Allocation filtering ie. canRemain is only called on started shards which might not be the case... \n", "created_at": "2016-07-14T10:21:09Z" }, { "body": "@s1monw I was seeing this running tests on master on my desktop. It previously reproduced every time but it's fixed now, thanks for fixing!\n", "created_at": "2016-07-14T14:41:28Z" } ], "number": 19419, "title": "[CI] org.elasticsearch.backwards.MultiNodeBackwardsIT.test {p0=indices.shrink/10_basic/Shrink index via API} fails when multiple nodes are present" }
{ "body": "The Java API supports this while mostly used for tests it can also be useful in\nproduction environments. For instance if something is automated like a settings change\nand we execute some health right after it the settings update might have some consequences\nlike a reroute which hasn't been fully applied since the preconditions are not fulfilled yet.\nFor instance if not all shards started the settings update is applied but the reroute won't move\ncurrently initializing shards like in the shrink API test. Sure this could be done by waiting for\ngreen before but if the cluster moves shards due to some side-effects waiting for all events is\nstill useful. I also took the chance to add unittests to Priority.java\n\nCloses #19419\n", "number": 19432, "review_comments": [ { "body": "missing `normal` from the list\n", "created_at": "2016-07-14T10:29:17Z" } ], "title": "Add support for `wait_for_events` to the `_cluster/health` REST endpoint" }
{ "commits": [ { "message": "Add support for `wait_for_events` to the `_cluster/healt` REST endpoint\n\nThe Java API supports this while mostly used for tests it can also be useful in\nproduction environments. For instance if something is automated like a settings change\nand we execute some health right after it the settings update might have some consequences\nlike a reroute which hasn't been fully applied since the preconditions are not fulfilled yet.\nFor instance if not all shards started the settings update is applied but the reroute won't move\ncurrently initializing shards like in the shrink API test. Sure this could be done by waiting for\ngreen before but if the cluster moves shards due to some side-effects waiting for all events is\nstill useful. I also took the chance to add unittests to Priority.java\n\nCloses #19419" }, { "message": "add missing priority" } ], "files": [ { "diff": "@@ -22,6 +22,10 @@\n import org.elasticsearch.common.io.stream.StreamOutput;\n \n import java.io.IOException;\n+import java.util.Arrays;\n+import java.util.Collection;\n+import java.util.List;\n+import java.util.Locale;\n \n /**\n *\n@@ -56,7 +60,7 @@ public static Priority fromByte(byte b) {\n public static final Priority NORMAL = new Priority((byte) 2);\n public static final Priority LOW = new Priority((byte) 3);\n public static final Priority LANGUID = new Priority((byte) 4);\n- private static final Priority[] values = new Priority[] { IMMEDIATE, URGENT, HIGH, NORMAL, LOW, LANGUID };\n+ private static final List<Priority> values = Arrays.asList(IMMEDIATE, URGENT, HIGH, NORMAL, LOW, LANGUID);\n \n private final byte value;\n \n@@ -65,9 +69,9 @@ private Priority(byte value) {\n }\n \n /**\n- * @return an array of all available priorities, sorted from the highest to the lowest.\n+ * @return a list of all available priorities, sorted from the highest to the lowest.\n */\n- public static Priority[] values() {\n+ public static List<Priority> values() {\n return values;\n }\n \n@@ -113,4 +117,23 @@ public String toString() {\n return \"LANGUID\";\n }\n }\n+\n+ public static Priority valueOf(String value) {\n+ switch (value) {\n+ case \"IMMEDIATE\":\n+ return IMMEDIATE;\n+ case \"URGENT\":\n+ return URGENT;\n+ case \"HIGH\":\n+ return HIGH;\n+ case \"NORMAL\":\n+ return NORMAL;\n+ case \"LOW\":\n+ return LOW;\n+ case \"LANGUID\":\n+ return LANGUID;\n+ default:\n+ throw new IllegalArgumentException(\"no such priority: \" + value);\n+ }\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/common/Priority.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;\n import org.elasticsearch.client.node.NodeClient;\n import org.elasticsearch.cluster.health.ClusterHealthStatus;\n+import org.elasticsearch.common.Priority;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n@@ -62,6 +63,9 @@ public void handleRequest(final RestRequest request, final RestChannel channel,\n clusterHealthRequest.waitForRelocatingShards(request.paramAsInt(\"wait_for_relocating_shards\", clusterHealthRequest.waitForRelocatingShards()));\n clusterHealthRequest.waitForActiveShards(request.paramAsInt(\"wait_for_active_shards\", clusterHealthRequest.waitForActiveShards()));\n clusterHealthRequest.waitForNodes(request.param(\"wait_for_nodes\", clusterHealthRequest.waitForNodes()));\n+ if (request.param(\"wait_for_events\") != null) {\n+ clusterHealthRequest.waitForEvents(Priority.valueOf(request.param(\"wait_for_events\").toUpperCase(Locale.ROOT)));\n+ }\n client.admin().cluster().health(clusterHealthRequest, new RestStatusToXContentListener<ClusterHealthResponse>(channel));\n }\n ", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java", "status": "modified" }, { "diff": "@@ -612,13 +612,12 @@ public void testPrioritizedTasks() throws Exception {\n BlockingTask block = new BlockingTask(Priority.IMMEDIATE);\n clusterService.submitStateUpdateTask(\"test\", block);\n int taskCount = randomIntBetween(5, 20);\n- Priority[] priorities = Priority.values();\n \n // will hold all the tasks in the order in which they were executed\n List<PrioritizedTask> tasks = new ArrayList<>(taskCount);\n CountDownLatch latch = new CountDownLatch(taskCount);\n for (int i = 0; i < taskCount; i++) {\n- Priority priority = priorities[randomIntBetween(0, priorities.length - 1)];\n+ Priority priority = randomFrom(Priority.values());\n clusterService.submitStateUpdateTask(\"test\", new PrioritizedTask(priority, latch, tasks));\n }\n ", "filename": "core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,98 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.common;\n+\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.Collections;\n+import java.util.List;\n+\n+public class PriorityTests extends ESTestCase {\n+\n+ public void testValueOf() {\n+ for (Priority p : Priority.values()) {\n+ assertSame(p, Priority.valueOf(p.toString()));\n+ }\n+\n+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {\n+ Priority.valueOf(\"foobar\");\n+ });\n+ assertEquals(\"no such priority: foobar\", exception.getMessage());\n+ }\n+\n+ public void testToString() {\n+ assertEquals(\"IMMEDIATE\", Priority.IMMEDIATE.toString());\n+ assertEquals(\"HIGH\", Priority.HIGH.toString());\n+ assertEquals(\"LANGUID\", Priority.LANGUID.toString());\n+ assertEquals(\"LOW\", Priority.LOW.toString());\n+ assertEquals(\"URGENT\", Priority.URGENT.toString());\n+ assertEquals(\"NORMAL\", Priority.NORMAL.toString());\n+ assertEquals(6, Priority.values().size());\n+ }\n+\n+ public void testSerialization() throws IOException {\n+ for (Priority p : Priority.values()) {\n+ BytesStreamOutput out = new BytesStreamOutput();\n+ Priority.writeTo(p, out);\n+ Priority priority = Priority.readFrom(out.bytes().streamInput());\n+ assertSame(p, priority);\n+ }\n+ assertSame(Priority.IMMEDIATE, Priority.fromByte((byte) -1));\n+ assertSame(Priority.HIGH, Priority.fromByte((byte) 1));\n+ assertSame(Priority.LANGUID, Priority.fromByte((byte) 4));\n+ assertSame(Priority.LOW, Priority.fromByte((byte) 3));\n+ assertSame(Priority.NORMAL, Priority.fromByte((byte) 2));\n+ assertSame(Priority.URGENT,Priority.fromByte((byte) 0));\n+ assertEquals(6, Priority.values().size());\n+ }\n+\n+ public void testCompareTo() {\n+ assertTrue(Priority.IMMEDIATE.compareTo(Priority.URGENT) < 0);\n+ assertTrue(Priority.URGENT.compareTo(Priority.HIGH) < 0);\n+ assertTrue(Priority.HIGH.compareTo(Priority.NORMAL) < 0);\n+ assertTrue(Priority.NORMAL.compareTo(Priority.LOW) < 0);\n+ assertTrue(Priority.LOW.compareTo(Priority.LANGUID) < 0);\n+\n+ assertTrue(Priority.URGENT.compareTo(Priority.IMMEDIATE) > 0);\n+ assertTrue(Priority.HIGH.compareTo(Priority.URGENT) > 0);\n+ assertTrue(Priority.NORMAL.compareTo(Priority.HIGH) > 0);\n+ assertTrue(Priority.LOW.compareTo(Priority.NORMAL) > 0);\n+ assertTrue(Priority.LANGUID.compareTo(Priority.LOW) > 0);\n+\n+ for (Priority p : Priority.values()) {\n+ assertEquals(0, p.compareTo(p));\n+ }\n+ List<Priority> shuffeledAndSorted = new ArrayList<>(Priority.values());\n+ Collections.shuffle(shuffeledAndSorted, random());\n+ Collections.sort(shuffeledAndSorted);\n+ for (List<Priority> priorities : Arrays.asList(shuffeledAndSorted,\n+ Priority.values())) { // #values() guarantees order!\n+ assertSame(Priority.IMMEDIATE, priorities.get(0));\n+ assertSame(Priority.URGENT, priorities.get(1));\n+ assertSame(Priority.HIGH, priorities.get(2));\n+ assertSame(Priority.NORMAL, priorities.get(3));\n+ assertSame(Priority.LOW, priorities.get(4));\n+ assertSame(Priority.LANGUID, priorities.get(5));\n+ }\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/common/PriorityTests.java", "status": "added" }, { "diff": "@@ -50,7 +50,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {\n \n public void testPriorityQueue() throws Exception {\n PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<>();\n- List<Priority> priorities = Arrays.asList(Priority.values());\n+ List<Priority> priorities = Priority.values();\n Collections.shuffle(priorities, random());\n \n for (Priority priority : priorities) {", "filename": "core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java", "status": "modified" }, { "diff": "@@ -38,6 +38,11 @@\n \"type\" : \"string\",\n \"description\" : \"Wait until the specified number of nodes is available\"\n },\n+ \"wait_for_events\": {\n+ \"type\" : \"enum\",\n+ \"options\" : [\"immediate\", \"urgent\", \"high\", \"normal\", \"low\", \"languid\"],\n+ \"description\" : \"Wait until all currently queued events with the given priorty are processed\"\n+ },\n \"wait_for_relocating_shards\": {\n \"type\" : \"number\",\n \"description\" : \"Wait until the specified number of relocating shards is finished\"", "filename": "rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json", "status": "modified" }, { "diff": "@@ -47,6 +47,7 @@\n wait_for_status: green\n index: source\n wait_for_relocating_shards: 0\n+ wait_for_events: \"languid\"\n \n # now we do the actual shrink\n - do:", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 2.1.2 / 2.3.3\n\n**JVM version**: Java 8u91\n\n**OS version**: Ubuntu 16.04\n\n**Description of the problem including expected versus actual behavior**:\nSpecifying a high value for `size=` when using scroll causes OOM.\n\nE.g: running `curl -XGET 'localhost:9200/*/_search?size=99999999'` results in:\n\n```\n{\"error\":{\"root_cause\":[{\"type\":\"query_phase_execution_exception\",\"reason\":\"Result window is too large, from + size must be less than or equal to: [10000] but was [99999999]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level parameter.\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"query\",\"grouped\":true,\"failed_shards\":[{\"shard\":0,\"index\":\".triggered_watches\",\"node\":\"qmVwvx_9RDqdn6QG8n8SKQ\",\"reason\":{\"type\":\"query_phase_execution_exception\",\"reason\":\"Result window is too large, from + size must be less than or equal to: [10000] but was [99999999]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level parameter.\"}}]},\"status\":500}\n```\n\nBut when running `curl -XGET 'localhost:9200/*/_search?scroll=8m&size=99999999`, the high value for `size=` is accepted, causing the node running OOM.\n\n**Steps to reproduce**:\n1. Start ES\n2. Run `curl -XGET 'localhost:9200/*/_search?scroll=8m&size=99999999`\n3. Node runs OOM\n\n**Provide logs (if relevant)**:\n\nLogs when calling `_search?scroll=8m&size=99999999`\n\n```\n[2016-07-04 13:34:08,734][INFO ][monitor.jvm ] [Doctor Glitternight] [gc][young][1107][34] duration [781ms], collections [1]/[1.4s], total [781ms]/[7.7s], memory [440.3mb]->[394.6mb]/[990.7mb], all_pools {[young] [172.7mb]->[1mb]/[266.2mb]}{[survivor] [33.2mb]->[33.2mb]/[33.2mb]}{[old] [234.3mb]->[360.3mb]/[691.2mb]}\n[2016-07-04 13:36:00,540][WARN ][rest.suppressed ] /*/_search Params: {pretty=, size=99999999, scroll=8m, index=*}\njava.lang.OutOfMemoryError: Java heap space\n at org.elasticsearch.cache.recycler.PageCacheRecycler$1.newInstance(PageCacheRecycler.java:102)\n at org.elasticsearch.cache.recycler.PageCacheRecycler$1.newInstance(PageCacheRecycler.java:99)\n at org.elasticsearch.common.recycler.DequeRecycler.obtain(DequeRecycler.java:53)\n at org.elasticsearch.common.recycler.AbstractRecycler.obtain(AbstractRecycler.java:33)\n at org.elasticsearch.common.recycler.DequeRecycler.obtain(DequeRecycler.java:28)\n at org.elasticsearch.common.recycler.FilterRecycler.obtain(FilterRecycler.java:39)\n at org.elasticsearch.common.recycler.Recyclers$3.obtain(Recyclers.java:119)\n at org.elasticsearch.common.recycler.FilterRecycler.obtain(FilterRecycler.java:39)\n at org.elasticsearch.cache.recycler.PageCacheRecycler.bytePage(PageCacheRecycler.java:150)\n at org.elasticsearch.common.util.AbstractBigArray.newBytePage(AbstractBigArray.java:108)\n at org.elasticsearch.common.util.BigByteArray.resize(BigByteArray.java:140)\n at org.elasticsearch.common.util.BigArrays.resizeInPlace(BigArrays.java:425)\n at org.elasticsearch.common.util.BigArrays.resize(BigArrays.java:472)\n at org.elasticsearch.common.util.BigArrays.grow(BigArrays.java:489)\n at org.elasticsearch.common.io.stream.BytesStreamOutput.ensureCapacity(BytesStreamOutput.java:160)\n at org.elasticsearch.common.io.stream.BytesStreamOutput.writeBytes(BytesStreamOutput.java:90)\n at org.elasticsearch.common.io.stream.StreamOutput.write(StreamOutput.java:299)\n at com.fasterxml.jackson.core.json.UTF8JsonGenerator._flushBuffer(UTF8JsonGenerator.java:2003)\n at com.fasterxml.jackson.core.json.UTF8JsonGenerator.writeRaw(UTF8JsonGenerator.java:597)\n at com.fasterxml.jackson.core.util.DefaultIndenter.writeIndentation(DefaultIndenter.java:94)\n at com.fasterxml.jackson.core.util.DefaultPrettyPrinter.writeObjectEntrySeparator(DefaultPrettyPrinter.java:307)\n at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writePPFieldName(UTF8JsonGenerator.java:354)\n at com.fasterxml.jackson.core.json.UTF8JsonGenerator.writeFieldName(UTF8JsonGenerator.java:181)\n at com.fasterxml.jackson.core.JsonGenerator.copyCurrentStructure(JsonGenerator.java:1557)\n at com.fasterxml.jackson.core.JsonGenerator.copyCurrentStructure(JsonGenerator.java:1566)\n at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.copyCurrentStructure(JsonXContentGenerator.java:425)\n at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.copyRawValue(JsonXContentGenerator.java:410)\n at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.writeRawField(JsonXContentGenerator.java:363)\n at org.elasticsearch.common.xcontent.XContentBuilder.rawField(XContentBuilder.java:914)\n at org.elasticsearch.common.xcontent.XContentHelper.writeRawField(XContentHelper.java:378)\n at org.elasticsearch.search.internal.InternalSearchHit.toXContent(InternalSearchHit.java:476)\n at org.elasticsearch.search.internal.InternalSearchHits.toXContent(InternalSearchHits.java:184)\n```\n\n**Describe the feature**:\n", "comments": [ { "body": "@nik9000 could you take a look at this please?\n", "created_at": "2016-07-06T13:44:01Z" }, { "body": "Sure. Same window size do you think?\n", "created_at": "2016-07-06T13:50:57Z" }, { "body": "I think so. Obviously the message will need to be different :D \n", "created_at": "2016-07-06T13:51:27Z" } ], "number": 19249, "title": "Scroll should limit size= value, as the search API already does." }
{ "body": "Limits the batch size from scrolling using the same setting as interactive\nsearch: `index.max_result_window`.\n\nCloses #19249\n", "number": 19367, "review_comments": [], "title": "Limit batch size when scrolling" }
{ "commits": [ { "message": "Limit batch size when scrolling\n\nLimits the batch size from scrolling using the same setting as interactive\nsearch: `index.max_result_window`.\n\nCloses #19249" }, { "message": "Fix scroll test\n\nIt was relying on unreasonably large windows crashing. Those large windows\nabort the request immediately now." } ], "files": [ { "diff": "@@ -192,19 +192,23 @@ public void preProcess() {\n if (hasOnlySuggest() ) {\n return;\n }\n- if (scrollContext == null) {\n- long from = from() == -1 ? 0 : from();\n- long size = size() == -1 ? 10 : size();\n- long resultWindow = from + size;\n- int maxResultWindow = indexService.getIndexSettings().getMaxResultWindow();\n+ long from = from() == -1 ? 0 : from();\n+ long size = size() == -1 ? 10 : size();\n+ long resultWindow = from + size;\n+ int maxResultWindow = indexService.getIndexSettings().getMaxResultWindow();\n \n- if (resultWindow > maxResultWindow) {\n+ if (resultWindow > maxResultWindow) {\n+ if (scrollContext == null) {\n throw new QueryPhaseExecutionException(this,\n \"Result window is too large, from + size must be less than or equal to: [\" + maxResultWindow + \"] but was [\"\n + resultWindow + \"]. See the scroll api for a more efficient way to request large data sets. \"\n + \"This limit can be set by changing the [\" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey()\n + \"] index level setting.\");\n }\n+ throw new QueryPhaseExecutionException(this,\n+ \"Batch size is too large, size must be less than or equal to: [\" + maxResultWindow + \"] but was [\" + resultWindow\n+ + \"]. Scroll batch sizes cost as much memory as result windows so they are controlled by the [\"\n+ + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + \"] index level setting.\");\n }\n if (rescore != null) {\n int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow();", "filename": "core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.rest.action.search.RestClearScrollAction;\n@@ -68,7 +69,7 @@\n import static org.hamcrest.Matchers.startsWith;\n \n /**\n- *\n+ * Tests for scrolling.\n */\n public class SearchScrollIT extends ESIntegTestCase {\n public void testSimpleScrollQueryThenFetch() throws Exception {\n@@ -419,11 +420,20 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception {\n assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);\n }\n \n+ /**\n+ * Tests that we use an optimization shrinking the batch to the size of the shard. Thus the Integer.MAX_VALUE window doesn't OOM us.\n+ */\n public void testDeepScrollingDoesNotBlowUp() throws Exception {\n client().prepareIndex(\"index\", \"type\", \"1\")\n .setSource(\"field\", \"value\")\n .setRefreshPolicy(IMMEDIATE)\n .execute().get();\n+ /*\n+ * Disable the max result window setting for this test because it'll reject the search's unreasonable batch size. We want\n+ * unreasonable batch sizes to just OOM.\n+ */\n+ client().admin().indices().prepareUpdateSettings(\"index\")\n+ .setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get();\n \n for (SearchType searchType : SearchType.values()) {\n SearchRequestBuilder builder = client().prepareSearch(\"index\")", "filename": "core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java", "status": "modified" }, { "diff": "@@ -10,13 +10,22 @@ setup:\n indices.refresh: {}\n \n ---\n-\"Request window limits\":\n+\"Request window limits without scroll\":\n - do:\n- catch: /Result window is too large, from \\+ size must be less than or equal to[:] \\[10000\\] but was \\[10010\\]/\n+ catch: /Result window is too large, from \\+ size must be less than or equal to[:] \\[10000\\] but was \\[10010\\]\\. See the scroll api for a more efficient way to request large data sets\\./\n search:\n index: test_1\n from: 10000\n \n+---\n+\"Request window limits with scroll\":\n+ - do:\n+ catch: /Batch size is too large, size must be less than or equal to[:] \\[10000\\] but was \\[10010\\]\\. Scroll batch sizes cost as much memory as result windows so they are controlled by the \\[index.max_result_window\\] index level setting\\./\n+ search:\n+ index: test_1\n+ scroll: 5m\n+ from: 10000\n+\n ---\n \"Rescore window limits\":\n - do:", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yaml", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0-alpha4\n\n**JVM version**: 1.8.0_60\n\n**Description of the problem including expected versus actual behavior**:\nThe has_child query does not return expected results when it contains a date based range query. I created a document type called **questions** which has a child document type called **answers**. The answers document has a **date** field. Once I insert sample data in elasticsearch, I can query the **answers** document successfully with a date range query. However, the has_child query does not return any results when the date range query utilizes any date value that precedes the answer dates. The following steps explain the issue further.\n\n**Steps to reproduce**:\n1) Create an index called \"sandbox\"\n\n```\nPUT /sandbox\n```\n\n2) Create a document mapping for \"answers\" with a parent type of \"questions\".\n\n```\nPUT /sandbox/_mapping/answers\n{\n \"_parent\": {\n \"type\": \"questions\" \n }\n , \"properties\": {\n \"answer\" : {\"type\": \"text\"},\n \"date\" : {\"type\": \"date\"}\n }\n}\n```\n\n3) Add a \"questions\" document\n\n```\nPUT /sandbox/questions/1\n{\n \"question\" : \"Why is the sky blue?\"\n}\n```\n\n4) Add a couple of \"answers\" document\n\n```\nPUT /sandbox/answers/1?parent=1\n{\n \"answer\" : \"Due to scattering of sunlight\",\n \"date\" : \"2016-05-01\"\n}\n```\n\n```\nPUT /sandbox/answers/2?parent=1\n{\n \"answer\" : \"Due to refraction of light\",\n \"date\" : \"2016-06-01\"\n}\n```\n\n5) Find all answers with a date value of gte \"2016-04-01\". \nTHIS WORKS AS EXPECTED. It returns the 2 answers documents as expected.\n\n```\nGET /sandbox/answers/_search\n{\n \"query\" : {\n \"range\": {\n \"date\": {\n \"gte\": \"2016-04-01\"\n }\n }\n }\n}\n```\n\n6) Now use the has_child query to get questions with answers that have date gte \"2016-04-01\". \n**DOES NOT WORK AS EXPECTED**. This returns 0 hits. I expected the 1 \"questions\" document in the response.\n\n```\nGET /sandbox/questions/_search\n{\n \"query\" : {\n \"has_child\": {\n \"type\": \"answers\",\n \"query\": {\n \"range\": {\n \"date\": {\n \"gte\": \"2016-04-01\"\n }\n }\n }\n }\n }\n}\n```\n", "comments": [ { "body": "The `has_child` query operates at the index level, you cannot restrict the type of your query like you're doing in your snippet: \n`GET /sandbox/questions/_search` \nTry with\n`/sandbox/_search`\n", "created_at": "2016-07-11T07:48:24Z" }, { "body": "> The has_child query operates at the index level, you cannot restrict the type of your query like you're doing in your snippet: \n\nThis is possible, since 2.0 if I recall correctly. \n\nThis really seems to be caused by a bug in: [HasChildQueryBuilder#rewrite(...)](https://github.com/elastic/elasticsearch/blob/8c40b2b54eac3e3ab3c41ece5c758be75173191b/core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java#L477)\n\nFixing that makes the query work as expected. I'll open a PR.\n", "created_at": "2016-07-11T08:02:41Z" } ], "number": 19353, "title": "has_child query returns no results when used with date range query" }
{ "body": "If a nested, has_child or has_parent query's inner query gets rewritten then the InnerHitBuilder should use that rewritten form too, otherwise this can cause exceptions in a later stage.\n\nAlso fixes a bug that HasChildQueryBuilder's rewrite method overwrites max_children with min_children value, which causes the bug reported in #19353\n", "number": 19360, "review_comments": [ { "body": "doesn't `new InnerHitBuilder(original)` already copy the parentChildType and nestedPath?\n", "created_at": "2016-07-12T07:19:01Z" }, { "body": "No, only the properties nested and parent/child inner hits have in common.\n", "created_at": "2016-07-12T09:09:18Z" }, { "body": "ok\n", "created_at": "2016-07-12T09:14:51Z" } ], "title": "Ensure that that InnerHitBuilder uses rewritten queries" }
{ "commits": [ { "message": "inner_hits: Ensure that that InnerHitBuilder uses rewritten queries\n\nIf a nested, has_child or has_parent query's inner query gets rewritten then the InnerHitBuilder should use that rewritten form too, otherwise this can cause exceptions in a later phase.\n\nAlso fixes a bug that HasChildQueryBuilder's rewrite method overwrites max_children with min_children value.\n\nCloses #19353" } ], "files": [ { "diff": "@@ -472,9 +472,10 @@ protected int doHashCode() {\n \n @Override\n protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException {\n- QueryBuilder rewrite = query.rewrite(queryRewriteContext);\n- if (rewrite != query) {\n- return new HasChildQueryBuilder(type, rewrite, minChildren, minChildren, scoreMode, innerHitBuilder);\n+ QueryBuilder rewrittenQuery = query.rewrite(queryRewriteContext);\n+ if (rewrittenQuery != query) {\n+ InnerHitBuilder rewrittenInnerHit = InnerHitBuilder.rewrite(innerHitBuilder, rewrittenQuery);\n+ return new HasChildQueryBuilder(type, rewrittenQuery, minChildren, maxChildren, scoreMode, rewrittenInnerHit);\n }\n return this;\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java", "status": "modified" }, { "diff": "@@ -309,9 +309,10 @@ protected int doHashCode() {\n \n @Override\n protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {\n- QueryBuilder rewrite = query.rewrite(queryShardContext);\n- if (rewrite != query) {\n- return new HasParentQueryBuilder(type, rewrite, score, innerHit);\n+ QueryBuilder rewrittenQuery = query.rewrite(queryShardContext);\n+ if (rewrittenQuery != query) {\n+ InnerHitBuilder rewrittenInnerHit = InnerHitBuilder.rewrite(innerHit, rewrittenQuery);\n+ return new HasParentQueryBuilder(type, rewrittenQuery, score, rewrittenInnerHit);\n }\n return this;\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java", "status": "modified" }, { "diff": "@@ -722,4 +722,16 @@ public static void extractInnerHits(QueryBuilder query, Map<String, InnerHitBuil\n }\n }\n \n+ static InnerHitBuilder rewrite(InnerHitBuilder original, QueryBuilder rewrittenQuery) {\n+ if (original == null) {\n+ return null;\n+ }\n+\n+ InnerHitBuilder copy = new InnerHitBuilder(original);\n+ copy.query = rewrittenQuery;\n+ copy.parentChildType = original.parentChildType;\n+ copy.nestedPath = original.nestedPath;\n+ return copy;\n+ }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java", "status": "modified" }, { "diff": "@@ -263,9 +263,10 @@ protected Query doToQuery(QueryShardContext context) throws IOException {\n \n @Override\n protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException {\n- QueryBuilder rewrite = query.rewrite(queryRewriteContext);\n- if (rewrite != query) {\n- return new NestedQueryBuilder(path, rewrite, scoreMode, innerHitBuilder);\n+ QueryBuilder rewrittenQuery = query.rewrite(queryRewriteContext);\n+ if (rewrittenQuery != query) {\n+ InnerHitBuilder rewrittenInnerHit = InnerHitBuilder.rewrite(innerHitBuilder, rewrittenQuery);\n+ return new NestedQueryBuilder(path, rewrittenQuery, scoreMode, rewrittenInnerHit);\n }\n return this;\n }", "filename": "core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java", "status": "modified" }, { "diff": "@@ -52,8 +52,6 @@\n import org.elasticsearch.search.sort.FieldSortBuilder;\n import org.elasticsearch.search.sort.SortOrder;\n import org.elasticsearch.test.AbstractQueryTestCase;\n-import org.junit.Before;\n-import org.junit.BeforeClass;\n \n import java.io.IOException;\n import java.util.Collections;\n@@ -74,6 +72,8 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue\n \n private static String similarity;\n \n+ boolean requiresRewrite = false;\n+\n @Override\n protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {\n similarity = randomFrom(\"classic\", \"BM25\");\n@@ -105,8 +105,14 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws\n protected HasChildQueryBuilder doCreateTestQueryBuilder() {\n int min = randomIntBetween(0, Integer.MAX_VALUE / 2);\n int max = randomIntBetween(min, Integer.MAX_VALUE);\n- HasChildQueryBuilder hqb = new HasChildQueryBuilder(CHILD_TYPE,\n- RandomQueryBuilder.createQuery(random()),\n+\n+ QueryBuilder innerQueryBuilder = RandomQueryBuilder.createQuery(random());\n+ if (randomBoolean()) {\n+ requiresRewrite = true;\n+ innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString());\n+ }\n+\n+ HasChildQueryBuilder hqb = new HasChildQueryBuilder(CHILD_TYPE, innerQueryBuilder,\n RandomPicks.randomFrom(random(), ScoreMode.values()));\n hqb.minMaxChildren(min, max);\n if (randomBoolean()) {\n@@ -127,25 +133,24 @@ protected void doAssertLuceneQuery(HasChildQueryBuilder queryBuilder, Query quer\n assertEquals(queryBuilder.maxChildren(), lpq.getMaxChildren());\n assertEquals(queryBuilder.scoreMode(), lpq.getScoreMode()); // WTF is this why do we have two?\n if (queryBuilder.innerHit() != null) {\n+ // have to rewrite again because the provided queryBuilder hasn't been rewritten (directly returned from\n+ // doCreateTestQueryBuilder)\n+ queryBuilder = (HasChildQueryBuilder) queryBuilder.rewrite(context);\n SearchContext searchContext = SearchContext.current();\n assertNotNull(searchContext);\n- if (query != null) {\n- Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n- InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);\n- for (InnerHitBuilder builder : innerHitBuilders.values()) {\n- builder.build(searchContext, searchContext.innerHits());\n- }\n- assertNotNull(searchContext.innerHits());\n- assertEquals(1, searchContext.innerHits().getInnerHits().size());\n- assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));\n- InnerHitsContext.BaseInnerHits innerHits =\n- searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName());\n- assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());\n- assertEquals(innerHits.sort().sort.getSort().length, 1);\n- assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2);\n- } else {\n- assertThat(searchContext.innerHits().getInnerHits().size(), equalTo(0));\n+ Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n+ InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);\n+ for (InnerHitBuilder builder : innerHitBuilders.values()) {\n+ builder.build(searchContext, searchContext.innerHits());\n }\n+ assertNotNull(searchContext.innerHits());\n+ assertEquals(1, searchContext.innerHits().getInnerHits().size());\n+ assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));\n+ InnerHitsContext.BaseInnerHits innerHits =\n+ searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName());\n+ assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());\n+ assertEquals(innerHits.sort().sort.getSort().length, 1);\n+ assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2);\n }\n }\n \n@@ -315,6 +320,17 @@ public void testUnknownObjectException() throws IOException {\n }\n }\n \n+ @Override\n+ public void testMustRewrite() throws IOException {\n+ try {\n+ super.testMustRewrite();\n+ } catch (UnsupportedOperationException e) {\n+ if (requiresRewrite == false) {\n+ throw e;\n+ }\n+ }\n+ }\n+\n public void testNonDefaultSimilarity() throws Exception {\n QueryShardContext shardContext = createShardContext();\n HasChildQueryBuilder hasChildQueryBuilder = QueryBuilders.hasChildQuery(CHILD_TYPE, new TermQueryBuilder(\"custom_string\", \"value\"), ScoreMode.None);", "filename": "core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -58,6 +58,8 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ\n protected static final String PARENT_TYPE = \"parent\";\n protected static final String CHILD_TYPE = \"child\";\n \n+ boolean requiresRewrite = false;\n+\n @Override\n protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {\n mapperService.merge(PARENT_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE,\n@@ -88,8 +90,12 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws\n */\n @Override\n protected HasParentQueryBuilder doCreateTestQueryBuilder() {\n- HasParentQueryBuilder hqb = new HasParentQueryBuilder(PARENT_TYPE,\n- RandomQueryBuilder.createQuery(random()),randomBoolean());\n+ QueryBuilder innerQueryBuilder = RandomQueryBuilder.createQuery(random());\n+ if (randomBoolean()) {\n+ requiresRewrite = true;\n+ innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString());\n+ }\n+ HasParentQueryBuilder hqb = new HasParentQueryBuilder(PARENT_TYPE, innerQueryBuilder, randomBoolean());\n if (randomBoolean()) {\n hqb.innerHit(new InnerHitBuilder()\n .setName(randomAsciiOfLengthBetween(1, 10))\n@@ -107,25 +113,25 @@ protected void doAssertLuceneQuery(HasParentQueryBuilder queryBuilder, Query que\n assertEquals(queryBuilder.score() ? ScoreMode.Max : ScoreMode.None, lpq.getScoreMode());\n \n if (queryBuilder.innerHit() != null) {\n+ // have to rewrite again because the provided queryBuilder hasn't been rewritten (directly returned from\n+ // doCreateTestQueryBuilder)\n+ queryBuilder = (HasParentQueryBuilder) queryBuilder.rewrite(context);\n+\n SearchContext searchContext = SearchContext.current();\n assertNotNull(searchContext);\n- if (query != null) {\n- Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n- InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);\n- for (InnerHitBuilder builder : innerHitBuilders.values()) {\n- builder.build(searchContext, searchContext.innerHits());\n- }\n- assertNotNull(searchContext.innerHits());\n- assertEquals(1, searchContext.innerHits().getInnerHits().size());\n- assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));\n- InnerHitsContext.BaseInnerHits innerHits = searchContext.innerHits()\n- .getInnerHits().get(queryBuilder.innerHit().getName());\n- assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());\n- assertEquals(innerHits.sort().sort.getSort().length, 1);\n- assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2);\n- } else {\n- assertThat(searchContext.innerHits().getInnerHits().size(), equalTo(0));\n+ Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n+ InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);\n+ for (InnerHitBuilder builder : innerHitBuilders.values()) {\n+ builder.build(searchContext, searchContext.innerHits());\n }\n+ assertNotNull(searchContext.innerHits());\n+ assertEquals(1, searchContext.innerHits().getInnerHits().size());\n+ assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));\n+ InnerHitsContext.BaseInnerHits innerHits = searchContext.innerHits()\n+ .getInnerHits().get(queryBuilder.innerHit().getName());\n+ assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());\n+ assertEquals(innerHits.sort().sort.getSort().length, 1);\n+ assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2);\n }\n }\n \n@@ -206,6 +212,17 @@ public void testUnknownObjectException() throws IOException {\n }\n }\n \n+ @Override\n+ public void testMustRewrite() throws IOException {\n+ try {\n+ super.testMustRewrite();\n+ } catch (UnsupportedOperationException e) {\n+ if (requiresRewrite == false) {\n+ throw e;\n+ }\n+ }\n+ }\n+\n public void testFromJson() throws IOException {\n String json =\n \"{\\n\" +", "filename": "core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java", "status": "modified" }, { "diff": "@@ -49,6 +49,8 @@\n \n public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBuilder> {\n \n+ boolean requiresRewrite = false;\n+\n @Override\n protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {\n mapperService.merge(\"nested_doc\", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(\"nested_doc\",\n@@ -68,7 +70,12 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws\n */\n @Override\n protected NestedQueryBuilder doCreateTestQueryBuilder() {\n- NestedQueryBuilder nqb = new NestedQueryBuilder(\"nested1\", RandomQueryBuilder.createQuery(random()),\n+ QueryBuilder innerQueryBuilder = RandomQueryBuilder.createQuery(random());\n+ if (randomBoolean()) {\n+ requiresRewrite = true;\n+ innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString());\n+ }\n+ NestedQueryBuilder nqb = new NestedQueryBuilder(\"nested1\", innerQueryBuilder,\n RandomPicks.randomFrom(random(), ScoreMode.values()));\n if (randomBoolean()) {\n nqb.innerHit(new InnerHitBuilder()\n@@ -87,24 +94,24 @@ protected void doAssertLuceneQuery(NestedQueryBuilder queryBuilder, Query query,\n ToParentBlockJoinQuery parentBlockJoinQuery = (ToParentBlockJoinQuery) query;\n // TODO how to assert this?\n if (queryBuilder.innerHit() != null) {\n+ // have to rewrite again because the provided queryBuilder hasn't been rewritten (directly returned from\n+ // doCreateTestQueryBuilder)\n+ queryBuilder = (NestedQueryBuilder) queryBuilder.rewrite(context);\n+\n SearchContext searchContext = SearchContext.current();\n assertNotNull(searchContext);\n- if (query != null) {\n- Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n- InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);\n- for (InnerHitBuilder builder : innerHitBuilders.values()) {\n- builder.build(searchContext, searchContext.innerHits());\n- }\n- assertNotNull(searchContext.innerHits());\n- assertEquals(1, searchContext.innerHits().getInnerHits().size());\n- assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));\n- InnerHitsContext.BaseInnerHits innerHits = searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName());\n- assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());\n- assertEquals(innerHits.sort().sort.getSort().length, 1);\n- assertEquals(innerHits.sort().sort.getSort()[0].getField(), INT_FIELD_NAME);\n- } else {\n- assertThat(searchContext.innerHits().getInnerHits().size(), equalTo(0));\n+ Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();\n+ InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);\n+ for (InnerHitBuilder builder : innerHitBuilders.values()) {\n+ builder.build(searchContext, searchContext.innerHits());\n }\n+ assertNotNull(searchContext.innerHits());\n+ assertEquals(1, searchContext.innerHits().getInnerHits().size());\n+ assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));\n+ InnerHitsContext.BaseInnerHits innerHits = searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName());\n+ assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());\n+ assertEquals(innerHits.sort().sort.getSort().length, 1);\n+ assertEquals(innerHits.sort().sort.getSort()[0].getField(), INT_FIELD_NAME);\n }\n }\n \n@@ -199,6 +206,17 @@ public void testUnknownObjectException() throws IOException {\n }\n }\n \n+ @Override\n+ public void testMustRewrite() throws IOException {\n+ try {\n+ super.testMustRewrite();\n+ } catch (UnsupportedOperationException e) {\n+ if (requiresRewrite == false) {\n+ throw e;\n+ }\n+ }\n+ }\n+\n public void testIgnoreUnmapped() throws IOException {\n final NestedQueryBuilder queryBuilder = new NestedQueryBuilder(\"unmapped\", new MatchAllQueryBuilder(), ScoreMode.None);\n queryBuilder.ignoreUnmapped(true);", "filename": "core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.2.2\n\n**JVM version**: 1.8.0_25\n\n**OS version**: OS X 10.11.5\n\n**Description of the problem including expected versus actual behavior**: \nFast Vector Highlighter doesn't highlight nested fields, whereas Plain Highlighter does (but has a [fragment_size bug](https://github.com/elastic/elasticsearch/issues/9442) so I can't use it). I would expect FVH to be able to highlight everything.\n\n**Steps to reproduce**:\n\n``` sh\ncurl -XPUT 'localhost:9200/nested_fvh?pretty' -d '{\n \"mappings\": {\n \"type1\": {\n \"properties\": {\n \"nested1\": {\n \"type\": \"nested\",\n \"properties\": {\n \"field1\": { \n \"type\": \"string\", \n \"term_vector\" : \"with_positions_offsets\" \n }\n }\n }\n }\n }\n }\n}\n'\n\ncurl -XPUT 'localhost:9200/nested_fvh/type1/1?pretty' -d '{\n \"nested1\": {\n \"field1\": \"Hello World!\"\n }\n}\n'\n\n\ncurl -XGET 'http://localhost:9200/nested_fvh/type1/_search?pretty' -d '{\n \"query\": {\n \"nested\": {\n \"path\": \"nested1\",\n \"query\": {\n \"match\": {\n \"nested1.field1\": \"hello\"\n }\n }\n }\n },\n \"highlight\": {\n \"fields\": {\n \"nested1.field1\": { \n \"type\": \"fvh\"\n }\n }\n }\n}\n'\n```\n\n**Output (fvh)**:\n<details>\n<summary>\n\nFVH doesn't highlight nested</summary>\n\n\n\n``` json\n{\n \"took\" : 3,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 0.4231198,\n \"hits\" : [ {\n \"_index\" : \"nested_fvh\",\n \"_type\" : \"type1\",\n \"_id\" : \"1\",\n \"_score\" : 0.4231198,\n \"_source\" : {\n \"nested1\" : {\n \"field1\" : \"Hello World!\"\n }\n }\n } ]\n }\n}\n```\n\n</details>\n\n**Output (plain)**:\n<details>\n<summary>\n\nPlain does highlight nested</summary>\n\n\n\n``` json\n{\n \"took\" : 3,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 0.4231198,\n \"hits\" : [ {\n \"_index\" : \"nested_fvh\",\n \"_type\" : \"type1\",\n \"_id\" : \"1\",\n \"_score\" : 0.4231198,\n \"_source\" : {\n \"nested1\" : {\n \"field1\" : \"Hello World!\"\n }\n },\n \"highlight\" : {\n \"nested1.field1\" : [ \"<em>Hello</em> World!\" ]\n }\n } ]\n }\n}\n```\n\n</details>\n", "comments": [ { "body": "(Oooh `<details>` tags! 👍 )\n\nThis should have been fixed with https://issues.apache.org/jira/browse/LUCENE-5929 but apparently it is still not working, neither in 2.3 nor in master.\n\n@martijnvg could you take a look please?\n", "created_at": "2016-07-05T11:20:24Z" }, { "body": "@clintongormley @martijnvg i have a similar case. Looks like when position offset is set, the highlighting doesn't work as expected. I have a full repro case that can be used in SENSE:\n\n```\nDELETE test\nPUT test\n{\n \"mappings\": {\n \"test_type\": {\n \"properties\": {\n \"nested_field\": {\n \"type\": \"nested\",\n \"properties\": {\n \"text\": {\n \"type\": \"string\",\n \"term_vector\": \"with_positions_offsets\",\n \"fields\": {\n \"raw\": {\n \"type\": \"string\",\n \"term_vector\": \"with_positions_offsets\"\n }\n }\n }\n }\n }\n }\n }\n }\n} \n\nPOST test/test_type\n{\n \"nested_field\": [\n {\n \"text\": \"text field\"\n }\n ]\n}\n\nPOST test/_search\n{\n \"query\": {\n \"nested\": {\n \"query\": {\n \"query_string\": {\n \"query\": \"text\",\n \"fields\": [\n \"nested_field.text.raw\"\n ]\n }\n },\n \"path\": \"nested_field\"\n }\n },\n \"highlight\": {\n \"fields\": {\n \"nested_field.text.raw\": {}\n }\n },\n \"fielddata_fields\": [\"nested_field.text.raw\"]\n}\n```\n\nCan you please confirm that it's related to this bug?\n\nJust to clarify, it's not related to multi_field. If you just use the text property you will get the same behaviour.\n\nIndeed, if you remove `\"term_vector\": \"with_positions_offsets\"` then it starts working. Must be related to this.\n", "created_at": "2016-07-06T15:26:37Z" }, { "body": "> Indeed, if you remove \"term_vector\": \"with_positions_offsets\" then it starts working. Must be related to this.\n\n@gmoskovicz that's because it uses the plain highlighter if term offsets are disabled.\n", "created_at": "2016-07-06T15:42:17Z" }, { "body": "@clintongormley @sfcgeorge @gmoskovicz I wonder if we should support highlighting on `nested`, `has_child` and `has_parent` query at all? This isn't the first problem that the highlighters have with these queries. I think instead we should promote the use of inner hits more. With inner hits highlighting the nested object work and on top of this it is more accurate too (since each nested object will be highlighted in isolation):\n\n``` json\n{\n \"query\": {\n \"nested\": {\n \"path\": \"nested1\",\n \"query\": {\n \"match\": {\n \"nested1.field1\": \"hello\"\n }\n },\n \"inner_hits\": {\n \"highlight\": {\n \"fields\": {\n \"nested1.field1\": {\n \"type\": \"fvh\"\n }\n }\n }\n }\n }\n }\n}\n```\n", "created_at": "2016-07-07T07:02:40Z" }, { "body": "@martijnvg That feels like making the programmer do something the system should be doing. I get that the implementation is hard because Lucene etc etc but ideally I'd like nested fields to be more seamless and easy to use, not require even more effort.\n\nUsing the global highlights as I currently am makes mapping the match back to the original object in my ORM very difficult. Using `inner_hits` would make that much easier BUT it doesn't work with searching `_all`. I need to be able to search `_all` so can't use `inner_hits` unfortunately. It's not ideal, but I'm stuck with global highlight, no `fragment_size` and `plain` highlighter for now.\n", "created_at": "2016-07-07T09:06:18Z" }, { "body": "@sfcgeorge I see, if you just query the `_all` field than using highlighting via `inner_hits` for that isn't very straightforward. However I do think that if fields inside nested objects are queried specifically then using highlighting via inner_hits should be used.\n", "created_at": "2016-07-07T09:20:42Z" }, { "body": "@martijnvg We need a Google-like search box that searches and highlights `_all`, but also advanced search on specific fields and nested fields, at the same time. If `_all` could percolate highlighting to `inner_hits` that would be perfect and much easier, but I don't think it can work. \n\n<details>\n<summary>\n\nMy failed attempts:</summary>\n\n\nBoth return correct result but no highlighting. \n\nHere I guess the `match_all` is stopping the highlighting.\n\n``` sh\ncurl -XGET 'http://localhost:9200/nested_fvh/type1/_search?pretty' -d '{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"simple_query_string\": {\n \"query\": \"hello\"\n }\n },\n {\n \"nested\": {\n \"path\": \"nested1\",\n \"filter\": {\n \"match_all\": {}\n },\n \"inner_hits\": {\n \"highlight\": {\n \"require_field_match\": false,\n \"fields\": {\n \"nested1.field1\": {\n \"type\": \"plain\"\n }\n }\n }\n }\n }\n }\n ]\n }\n }\n}\n'\n```\n\nGlobal inner hits sounded promising, this seems like it would be a great use-case for it if it worked. \n\n``` sh\ncurl -XGET 'http://localhost:9200/nested_fvh/type1/_search?pretty' -d '{\n \"query\": {\n \"simple_query_string\": {\n \"query\": \"hello\"\n }\n },\n \"inner_hits\": {\n \"inner_hits_name1\": {\n \"path\": { \n \"nested1\": {\n \"highlight\": {\n \"require_field_match\": false,\n \"fields\": {\n \"nested1.field1\": {\n \"type\": \"plain\"\n }\n }\n }\n }\n }\n }\n }\n}\n'\n```\n\n</details>\n", "created_at": "2016-07-07T10:05:12Z" }, { "body": "The FVH requires positions and offsets to be indexed, but highlighting is performed at the top level document and the positions and offsets for nested documents are inside the nested documents, so highlighting can't access them.\n\nSecondly, highlighting nested documents at the top level will produce incorrect results, eg:\n\n<details>\n\n```\nPUT t\n{\n \"mappings\": {\n \"t\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"nested\",\n \"properties\": {\n \"text\": {\n \"type\": \"text\"\n },\n \"num\": {\n \"type\": \"integer\"\n }\n }\n }\n }\n }\n }\n}\n\nPUT t/t/1\n{\n \"foo\": [\n {\n \"text\": \"brown\",\n \"num\": 1\n },\n {\n \"text\": \"cow\",\n \"num\": 2\n }\n ]\n}\n\nGET t/_search\n{\n \"query\": {\n \"nested\": {\n \"path\": \"foo\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"foo.text\": \"brown cow\"\n }\n },\n {\n \"match\": {\n \"foo.num\": 1\n }\n }\n ]\n }\n }\n }\n },\n \"highlight\": {\n \"fields\": {\n \"foo.text\": {}\n }\n }\n}\n```\n\n</details>\n\nreturns highlight snippets `brown` and `cow`, while `cow` shouldn't have been highlighted. Highlighting with inner hits works correctly:\n\n<details>\n\n```\nGET t/_search\n{\n \"query\": {\n \"nested\": {\n \"path\": \"foo\",\n \"inner_hits\": {\n \"_source\": false,\n \"highlight\": {\n \"fields\": {\n \"foo.text\": {}\n }\n }\n },\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"foo.text\": \"brown cow\"\n }\n },\n {\n \"match\": {\n \"foo.num\": 1\n }\n }\n ]\n }\n }\n }\n }\n}\n```\n\n</details>\n\nThat said, if you want to be able to use the FVH on nested fields at the top level (with the incorrect results), then you should be able to use `copy_to` to copy the nested values into a top-level field and highlight on that:\n\n<details>\n\n```\nPUT t\n{\n \"mappings\": {\n \"t\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"nested\",\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"copy_to\": \"foo_text\"\n }\n }\n },\n \"foo_text\": {\n \"type\": \"text\",\n \"term_vector\": \"with_positions_offsets\",\n \"store\": true\n }\n }\n }\n }\n}\n\nPUT t/t/1\n{\n \"foo\": [\n {\n \"text\": \"brown\"\n },\n {\n \"text\": \"cow\"\n }\n ]\n}\n\nGET t/_search\n{\n \"query\": {\n \"nested\": {\n \"path\": \"foo\",\n \"query\": {\n \"match\": {\n \"foo.text\": \"brown cow\"\n }\n }\n }\n },\n \"highlight\": {\n \"require_field_match\": false,\n \"fields\": {\n \"foo_text\": {\n \"type\": \"fvh\"\n }\n }\n }\n}\n```\n\n</details>\n\nUnfortunately, this doesn't work for some reason. It works with the `plain` highlighter but not with `fvh`. This IS a bug and @martijnvg is going to investigate.\n", "created_at": "2016-07-07T10:32:20Z" }, { "body": "Hi @martijnvg,\r\n\r\nI know that this issue was solved at #19337, but I still have exactly the same problem as the first comment.\r\nHowever, I put highlight into nested inner_hits, it worked.\r\nDue to the need of multiple query for same nested path, it needs to give different names of inner_hits for each. It's quite inconvenient to extract all highlight results.\r\nI'm wondering if it can use under global highlight. I'm using version 7.6.1.\r\nHope someone can help. Thanks in advance.", "created_at": "2020-10-15T09:53:12Z" } ], "number": 19265, "title": "FVH doesn't highlight nested fields" }
{ "body": "PR for #19265\n", "number": 19337, "review_comments": [], "title": "Let fast vector highlighter also extract terms from the nested query's inner query." }
{ "commits": [ { "message": "fvh: Also extract terms from the nested query' inner query.\n\nCloses #19265" } ], "files": [ { "diff": "@@ -27,6 +27,7 @@\n import org.apache.lucene.search.PhraseQuery;\n import org.apache.lucene.search.Query;\n import org.apache.lucene.search.TermQuery;\n+import org.apache.lucene.search.join.ToParentBlockJoinQuery;\n import org.apache.lucene.search.spans.SpanTermQuery;\n import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;\n import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;\n@@ -71,6 +72,9 @@ void flatten(Query sourceQuery, IndexReader reader, Collection<Query> flatQuerie\n } else if (sourceQuery instanceof BlendedTermQuery) {\n final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;\n flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);\n+ } else if (sourceQuery instanceof ToParentBlockJoinQuery) {\n+ ToParentBlockJoinQuery blockJoinQuery = (ToParentBlockJoinQuery) sourceQuery;\n+ flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);\n } else {\n super.flatten(sourceQuery, reader, flatQueries, boost);\n }", "filename": "core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java", "status": "modified" }, { "diff": "@@ -19,10 +19,12 @@\n package org.elasticsearch.search.highlight;\n \n import com.carrotsearch.randomizedtesting.generators.RandomPicks;\n+import org.apache.lucene.search.join.ScoreMode;\n import org.elasticsearch.Version;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n+import org.elasticsearch.action.support.WriteRequest;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.settings.Settings.Builder;\n@@ -64,6 +66,7 @@\n import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;\n import static org.elasticsearch.index.query.QueryBuilders.matchQuery;\n import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;\n+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;\n import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;\n import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;\n import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;\n@@ -2649,4 +2652,43 @@ public void testStringFieldHighlighting() throws IOException {\n assertThat(search.getHits().totalHits(), equalTo(1L));\n assertThat(search.getHits().getAt(0).getHighlightFields().get(\"string_field\").getFragments()[0].string(), equalTo(\"<em>some</em> <em>text</em>\"));\n }\n+\n+ public void testACopyFieldWithNestedQuery() throws Exception {\n+ String mapping = jsonBuilder().startObject().startObject(\"type\").startObject(\"properties\")\n+ .startObject(\"foo\")\n+ .field(\"type\", \"nested\")\n+ .startObject(\"properties\")\n+ .startObject(\"text\")\n+ .field(\"type\", \"text\")\n+ .field(\"copy_to\", \"foo_text\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .startObject(\"foo_text\")\n+ .field(\"type\", \"text\")\n+ .field(\"term_vector\", \"with_positions_offsets\")\n+ .field(\"store\", true)\n+ .endObject()\n+ .endObject().endObject().endObject().string();\n+ prepareCreate(\"test\").addMapping(\"type\", mapping).get();\n+\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(jsonBuilder().startObject().startArray(\"foo\")\n+ .startObject().field(\"text\", \"brown\").endObject()\n+ .startObject().field(\"text\", \"cow\").endObject()\n+ .endArray().endObject())\n+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)\n+ .get();\n+\n+ SearchResponse searchResponse = client().prepareSearch()\n+ .setQuery(nestedQuery(\"foo\", matchQuery(\"foo.text\", \"brown cow\"), ScoreMode.None))\n+ .highlighter(new HighlightBuilder()\n+ .field(new Field(\"foo_text\").highlighterType(\"fvh\"))\n+ .requireFieldMatch(false))\n+ .get();\n+ assertHitCount(searchResponse, 1);\n+ HighlightField field = searchResponse.getHits().getAt(0).highlightFields().get(\"foo_text\");\n+ assertThat(field.getFragments().length, equalTo(2));\n+ assertThat(field.getFragments()[0].string(), equalTo(\"<em>brown</em>\"));\n+ assertThat(field.getFragments()[1].string(), equalTo(\"<em>cow</em>\"));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\n5.0.0-alpha4\n**JVM version**:\n1.8\n**OS version**:\nMac OS Sierra\n**Description of the problem including expected versus actual behavior**:\nThis is a very minor thing, the logline for logging requests is missing a slash. It looks like this:\n\n```\nrequest [GET http://127.0.0.1:9201_cluster/health] returned [HTTP/1.1 200 OK]\n```\n\nIt should be like this:\n\n```\nrequest [GET http://127.0.0.1:9201/_cluster/health] returned [HTTP/1.1 200 OK]\n```\n\nThe executed request is like this:\n\n```\n Response response = client.performRequest(\n \"GET\",\n \"_cluster/health\",\n new Hashtable<>(),\n null);\n```\n\nI do not enter the slash before cluster, but it works, so the logs should add the slash as well.\n", "comments": [], "number": 19314, "title": "Logline of request logger in java rest client missing slash" }
{ "body": "Rest Client: add slash to log line when missing between host and uri\n\nCloses #19314\n", "number": 19325, "review_comments": [], "title": "Rest Client: add slash to log line when missing between host and uri" }
{ "commits": [ { "message": "Rest Client: add slash to log line when missing between host and uri\n\nCloses #19314" } ], "files": [ { "diff": "@@ -26,6 +26,7 @@\n import org.apache.http.HttpEntityEnclosingRequest;\n import org.apache.http.HttpHost;\n import org.apache.http.HttpResponse;\n+import org.apache.http.RequestLine;\n import org.apache.http.client.methods.HttpUriRequest;\n import org.apache.http.entity.BufferedHttpEntity;\n import org.apache.http.entity.ContentType;\n@@ -55,7 +56,7 @@ private RequestLogger() {\n */\n static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) {\n if (logger.isDebugEnabled()) {\n- logger.debug(\"request [\" + request.getMethod() + \" \" + host + request.getRequestLine().getUri() +\n+ logger.debug(\"request [\" + request.getMethod() + \" \" + host + getUri(request.getRequestLine()) +\n \"] returned [\" + httpResponse.getStatusLine() + \"]\");\n }\n if (tracer.isTraceEnabled()) {\n@@ -81,7 +82,7 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR\n * Logs a request that failed\n */\n static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) {\n- logger.debug(\"request [\" + request.getMethod() + \" \" + host + request.getRequestLine().getUri() + \"] failed\", e);\n+ logger.debug(\"request [\" + request.getMethod() + \" \" + host + getUri(request.getRequestLine()) + \"] failed\", e);\n if (logger.isTraceEnabled()) {\n String traceRequest;\n try {\n@@ -98,7 +99,7 @@ static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host,\n * Creates curl output for given request\n */\n static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException {\n- String requestLine = \"curl -iX \" + request.getMethod() + \" '\" + host + request.getRequestLine().getUri() + \"'\";\n+ String requestLine = \"curl -iX \" + request.getMethod() + \" '\" + host + getUri(request.getRequestLine()) + \"'\";\n if (request instanceof HttpEntityEnclosingRequest) {\n HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;\n if (enclosingRequest.getEntity() != null) {\n@@ -143,4 +144,11 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException {\n }\n return responseLine;\n }\n+\n+ private static String getUri(RequestLine requestLine) {\n+ if (requestLine.getUri().charAt(0) != '/') {\n+ return \"/\" + requestLine.getUri();\n+ }\n+ return requestLine.getUri();\n+ }\n }", "filename": "client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java", "status": "modified" }, { "diff": "@@ -50,7 +50,14 @@ public class RequestLoggerTests extends RestClientTestCase {\n \n public void testTraceRequest() throws IOException, URISyntaxException {\n HttpHost host = new HttpHost(\"localhost\", 9200, getRandom().nextBoolean() ? \"http\" : \"https\");\n- URI uri = new URI(\"/index/type/_api\");\n+\n+ String expectedEndpoint = \"/index/type/_api\";\n+ URI uri;\n+ if (randomBoolean()) {\n+ uri = new URI(expectedEndpoint);\n+ } else {\n+ uri = new URI(\"index/type/_api\");\n+ }\n \n HttpRequestBase request;\n int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7);\n@@ -83,7 +90,7 @@ public void testTraceRequest() throws IOException, URISyntaxException {\n throw new UnsupportedOperationException();\n }\n \n- String expected = \"curl -iX \" + request.getMethod() + \" '\" + host + uri + \"'\";\n+ String expected = \"curl -iX \" + request.getMethod() + \" '\" + host + expectedEndpoint + \"'\";\n boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();\n String requestBody = \"{ \\\"field\\\": \\\"value\\\" }\";\n if (hasBody) {", "filename": "client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java", "status": "modified" } ] }
{ "body": "It happened during a rolling restart needed for a security upgrade. The cluster is running elastic 2.3.3.\nAll nodes are running the same JVM version (OpenJDK 64-Bit Server VM (build 24.95-b01, mixed mode)).\n\nA RemoteTransportException seemed to \"loop?\" between 2 nodes causing elastic to log bigger and bigger exception traces as a new RemoteException exception seemed to be created with the previous one carrying all its causes.\n\nThe first trace was (on elastic1045) : \n\n```\n[2016-06-30 08:34:20,553][WARN ][org.elasticsearch ] Exception cause unwrapping ran for 10 levels...\nRemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1045][10.64.48.143:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1045][10.64.48.143:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1045][10.64.48.143:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1045][10.64.48.143:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1045][10.64.48.143:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s]]]; nested: RemoteTransportException[[elastic1036][10.64.16.45:9300][indices:data/write/bulk[s][p]]]; nested: IllegalIndexShardStateException[CurrentState[POST_RECOVERY] operation only allowed when started/recovering, origin [PRIMARY]];\n[[11 lines of Caused by: RemoteTransportException]]\nCaused by: [itwiki_general_1415230945][[itwiki_general_1415230945][2]] IllegalIndexShardStateException[CurrentState[POST_RECOVERY] operation only allowed when started/recovering, origin [PRIMARY]]\n at org.elasticsearch.index.shard.IndexShard.ensureWriteAllowed(IndexShard.java:1062)\n at org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:593)\n at org.elasticsearch.index.engine.Engine$Index.execute(Engine.java:836)\n at org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary(TransportIndexAction.java:237)\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardIndexOperation(TransportShardBulkAction.java:326)\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardUpdateOperation(TransportShardBulkAction.java:389)\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:191)\n at org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:68)\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.doRun(TransportReplicationAction.java:639)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:279)\n at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:271)\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:75)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:376)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n```\n\nThe second one (same root cause) appeared few ms after with also 12 causes.\nThe third and fourth ones had 14 causes, fifth and sixth 16 causes and so on...\nThe last one I've seen had 1982 chained causes.\n\nThe logs were nearly the same on elastic1036 (master) generating 27gig of logs in few minutes on both nodes.\n\nSurprisingly the cluster was still performing relatively well with higher gc activity on these nodes.\n\nThen (maybe 1 hour after the first trace) elastic1045 was dropped from the cluster:\n\n```\n[2016-06-30 09:48:25,953][INFO ][discovery.zen ] [elastic1045] master_left [{elastic1036}{DUOG0aGqQ3Gajr_wcFTOyw}{10.64.16.45}{10.64.16.45:9300}{rack=B3, row=B, master=true}], reason [failed to ping, tried [3] times, each with maximum [30s] timeout]\n```\n\nIt was immediately re-added and the log flood stopped.\n\nI'll comment on this ticket if it happens again.\n", "comments": [ { "body": "I think that this is a manifestation of #12573. It can happen when the target node of a primary relocation takes a long time to apply the cluster state that contains the information that it has the new primary. It is fixed in the upcoming v5.0.0 (#16274). The question is why the node took so long to apply a cluster state update. Is there anything else in the logs that might indicate this? What is the time stamp of the last log entry that has one of those huge exception traces?\n", "created_at": "2016-06-30T15:16:26Z" }, { "body": "@ywelsch this is still unclear, the first trace was 08:34:20,553 and the last one was a StackOverflow at 09:48:19,128 : \n\n```\n[2016-06-30 09:48:19,128][WARN ][action.bulk ] [elastic1036] Failed to send response for indices:data/write/bulk[s]\njava.lang.StackOverflowError\n at java.lang.Exception.<init>(Exception.java:84)\n at java.lang.RuntimeException.<init>(RuntimeException.java:80)\n at org.elasticsearch.ElasticsearchException.<init>(ElasticsearchException.java:84)\n at org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.<init>(NotSerializableExceptionWrapper.java:41)\n at org.elasticsearch.common.io.stream.StreamOutput.writeThrowable(StreamOutput.java:560)\n at org.elasticsearch.ElasticsearchException.writeTo(ElasticsearchException.java:226)\n at org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.writeTo(NotSerializableExceptionWrapper.java:65)\n at org.elasticsearch.common.io.stream.StreamOutput.writeThrowable(StreamOutput.java:564)\n at org.elasticsearch.ElasticsearchException.writeTo(ElasticsearchException.java:226)\n at org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.writeTo(NotSerializableExceptionWrapper.java:65)\n at org.elasticsearch.common.io.stream.StreamOutput.writeThrowable(StreamOutput.java:564)\n at org.elasticsearch.ElasticsearchException.writeTo(ElasticsearchException.java:226)\n at org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.writeTo(NotSerializableExceptionWrapper.java:65)\n at org.elasticsearch.common.io.stream.StreamOutput.writeThrowable(StreamOutput.java:564)\n at org.elasticsearch.ElasticsearchException.writeTo(ElasticsearchException.java:226)\n at org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.writeTo(NotSerializableExceptionWrapper.java:65)\n at org.elasticsearch.common.io.stream.StreamOutput.writeThrowable(StreamOutput.java:564)\n at org.elasticsearch.ElasticsearchException.writeTo(ElasticsearchException.java:226)\n at org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.writeTo(NotSerializableExceptionWrapper.java:65)\n at org.elasticsearch.common.io.stream.StreamOutput.writeThrowable(StreamOutput.java:564)\n at org.elasticsearch.ElasticsearchException.writeTo(ElasticsearchException.java:226)\n\n```\n\nNote that It's happening again right now with two other nodes elastic1021 and elastic1036 (still master).\n\nUnfortunately keeping the logs is difficult (disk full).\n", "created_at": "2016-06-30T16:30:55Z" }, { "body": "It is tricky to verify that this is indeed #12573 (If so, we could think about backporting #16274). Once the exceptions start bubbling up, the nodes have up-to-date cluster states (i.e. the node with the primary relocation target now has the cluster state where primary relocation is completed). It’s just by unwinding the deep call stack of the recursive calls between the nodes where the exceptions are stacked on top of each other.\n\nIs the rolling restart of the cluster completed by now? If so, are there any shard relocations in progress?\n\n@bleskes thoughts?\n", "created_at": "2016-06-30T17:55:20Z" }, { "body": "Note that I'm pretty sure that the second time it happened the cluster was green but certainly with shards relocating.\n\nI've restarted the master to schedule a new election, we'll monitor the cluster state and comment this ticket with any new relevant info.\n\nI agree with you, I'm not sure that exception timestamp in the logs are relevant because it seems to be a recursion problem and most of the logs where generated by the circuit breaker in ExceptionsHelper#unwrapCause (I wonder if the same kind of circuit breaker should be added to the logger itself to avoid writing bazillions of Caused By lines).\n", "created_at": "2016-06-30T18:43:36Z" }, { "body": "Can you share a pair of subsequent log messages, one with `n` causes, and the next with `n + 2` causes showing the full stack trace for each log entry?\n", "created_at": "2016-07-06T15:29:46Z" }, { "body": "It happened again today.\nWhat seems to be clear is that it happens when the cluster goes back to green after a node restart thus when rebalacing starts.\nCluster settings:\n- node_concurrent_recoveries: 3\n- node_initial_primaries_recoveries: 3\n- cluster_concurrent_rebalance: 16\n- max_bytes_per_sec is pretty low at 20mb with 3 concurrent_streams (we encounter latency issues if we set more)\n\nIt's unclear to me how node_concurrent_recoveries and cluster_concurrent_rebalance interacts together. What happens if the cluster decide to rebalance more than 3 shards to the same node will node_concurrent_recoveries prevent this from happening?\n\nI think that what saves us from OOM is a StackOverflow when the huge exception is serialized.\n\n@jasontedor here is the first 4 log entries : (https://gist.github.com/nomoa/2ee1f8bb44a4c6c01c400787d66bc383)\n\nHere the pattern seems to be 2 with 13 cause, 2 with 15 causes and so on...\nThe last one I've seen in the log before filling up the disk seemed to have 1085 causes. This single log entry was 54m of text...\n", "created_at": "2016-07-06T17:51:06Z" }, { "body": "@nomoa I've back-ported the fix in #12573 to 2.4 (#19296). All information so far indicates that it is this issue you're experiencing. Unfortunately my back-port was too late to make it for 2.3.4. You will have to wait for 2.4.0 to test it out. In the meantime, I wonder if dedicated master nodes would help here. If I understand correctly, this issue appeared only when a primary shard on the master node was involved. As cluster states are first applied on all the other nodes before it's applied on the master node, and if cluster state application is slow (due to large number of indices / shards etc.), having dedicated master nodes might decrease the time in which cluster states are out of sync on the nodes holding the primary relocation source and target.\n\nMight also be interesting to increase logging level of \"org.elasticsearch.cluster.service\" to DEBUG to see how long nodes take to apply the cluster state (messages of the form \"processing [{}]: took {} done applying updated cluster_state\").\n", "created_at": "2016-07-07T15:30:04Z" }, { "body": "@ywelsch awesome, thanks for the backport.\n\nYes it always happened on shards where the master was involved, and if I understood correctly this specific issue could happen between two data nodes. Note that It's not the first time we suspect the master being too busy to act properly.\n\nMoving to a dedicated master node is on our todo list, thanks for the suggestions.\n", "created_at": "2016-07-07T15:49:55Z" }, { "body": "Happening for me as well. I have disabled logging for the time being. Waiting for ES 2.4.0 :)\n", "created_at": "2016-08-22T06:29:22Z" }, { "body": "Closed by https://github.com/elastic/elasticsearch/pull/19296\n\nOnce 2.4.0 is out, please ping on this ticket if you're still seeing the same issue.\n", "created_at": "2016-08-22T14:31:19Z" } ], "number": 19187, "title": "Nested RemoteTransportExceptions flood the logs and fill the disk" }
{ "body": "Backport of #16274 to 2.4.0.\nCloses #19187\n", "number": 19296, "review_comments": [], "title": "Prevent TransportReplicationAction to route request based on stale local routing table" }
{ "commits": [ { "message": "Prevent TransportReplicationAction to route request based on stale local routing table\n\nCloses #16274\nCloses #12573\nCloses #12574" } ], "files": [ { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.action.support.replication;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionRequest;\n import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.action.IndicesRequest;\n@@ -58,6 +59,8 @@ public class ReplicationRequest<T extends ReplicationRequest<T>> extends ChildTa\n private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;\n private volatile boolean canHaveDuplicates = false;\n \n+ private long routedBasedOnClusterVersion = 0;\n+\n public ReplicationRequest() {\n \n }\n@@ -170,6 +173,20 @@ public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {\n return (T) this;\n }\n \n+ /**\n+ * Sets the minimum version of the cluster state that is required on the next node before we redirect to another primary.\n+ * Used to prevent redirect loops, see also {@link TransportReplicationAction.ReroutePhase#doRun()}\n+ */\n+ @SuppressWarnings(\"unchecked\")\n+ T routedBasedOnClusterVersion(long routedBasedOnClusterVersion) {\n+ this.routedBasedOnClusterVersion = routedBasedOnClusterVersion;\n+ return (T) this;\n+ }\n+\n+ long routedBasedOnClusterVersion() {\n+ return routedBasedOnClusterVersion;\n+ }\n+\n @Override\n public ActionRequestValidationException validate() {\n ActionRequestValidationException validationException = null;\n@@ -192,6 +209,9 @@ public void readFrom(StreamInput in) throws IOException {\n index = in.readString();\n canHaveDuplicates = in.readBoolean();\n // no need to serialize threaded* parameters, since they only matter locally\n+ if (in.getVersion().onOrAfter(Version.V_2_4_0)) {\n+ routedBasedOnClusterVersion = in.readVLong();\n+ }\n }\n \n @Override\n@@ -207,6 +227,9 @@ public void writeTo(StreamOutput out) throws IOException {\n timeout.writeTo(out);\n out.writeString(index);\n out.writeBoolean(canHaveDuplicates);\n+ if (out.getVersion().onOrAfter(Version.V_2_4_0)) {\n+ out.writeVLong(routedBasedOnClusterVersion);\n+ }\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java", "status": "modified" }, { "diff": "@@ -479,6 +479,15 @@ protected void doRun() {\n }\n performAction(node, transportPrimaryAction, true);\n } else {\n+ if (state.version() < request.routedBasedOnClusterVersion()) {\n+ logger.trace(\"failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...\", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());\n+ retryBecauseUnavailable(request.shardId(), \"failed to find primary as current cluster state with version [\" + state.version() + \"] is stale (expected at least [\" + request.routedBasedOnClusterVersion() + \"]\");\n+ return;\n+ } else {\n+ // chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version\n+ // this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.\n+ request.routedBasedOnClusterVersion(state.version());\n+ }\n if (logger.isTraceEnabled()) {\n logger.trace(\"send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]\", actionName, request.shardId(), request, state.version(), primary.currentNodeId());\n }", "filename": "core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java", "status": "modified" }, { "diff": "@@ -38,6 +38,7 @@\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;\n import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.cluster.routing.IndexShardRoutingTable;\n import org.elasticsearch.cluster.routing.ShardIterator;\n import org.elasticsearch.cluster.routing.ShardRouting;\n@@ -56,6 +57,7 @@\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.shard.ShardNotFoundException;\n import org.elasticsearch.rest.RestStatus;\n+import org.elasticsearch.test.ESAllocationTestCase;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.cluster.TestClusterService;\n import org.elasticsearch.test.transport.CapturingTransport;\n@@ -72,6 +74,7 @@\n \n import java.io.IOException;\n import java.util.ArrayList;\n+import java.util.Arrays;\n import java.util.HashMap;\n import java.util.HashSet;\n import java.util.List;\n@@ -217,6 +220,59 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept\n assertIndexShardCounter(1);\n }\n \n+ /**\n+ * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from\n+ * the relocation source to the relocation target. If relocation source receives and processes this cluster state\n+ * before the relocation target, there is a time span where relocation source believes active primary to be on\n+ * relocation target and relocation target believes active primary to be on relocation source. This results in replication\n+ * requests being sent back and forth.\n+ *\n+ * This test checks that replication request is not routed back from relocation target to relocation source in case of\n+ * stale index routing table on relocation target.\n+ */\n+ @Test\n+ public void testNoRerouteOnStaleClusterState() throws InterruptedException, ExecutionException {\n+ final String index = \"test\";\n+ final ShardId shardId = new ShardId(index, 0);\n+ ClusterState state = state(index, true, ShardRoutingState.RELOCATING);\n+ IndexShardRoutingTable shardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id());\n+ String relocationTargetNode = shardRoutingTable.primaryShard().relocatingNodeId();\n+ state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build();\n+ clusterService.setState(state);\n+ logger.debug(\"--> relocation ongoing state:\\n{}\", clusterService.state().prettyPrint());\n+\n+ Request request = new Request(shardId).timeout(\"1ms\").routedBasedOnClusterVersion(clusterService.state().version() + 1);\n+ PlainActionFuture<Response> listener = new PlainActionFuture<>();\n+ TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);\n+ reroutePhase.run();\n+ assertListenerThrows(\"cluster state too old didn't cause a timeout\", listener, UnavailableShardsException.class);\n+\n+ request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1);\n+ listener = new PlainActionFuture<>();\n+ reroutePhase = action.new ReroutePhase(null, request, listener);\n+ reroutePhase.run();\n+ assertFalse(\"cluster state too old didn't cause a retry\", listener.isDone());\n+\n+ // finish relocation\n+ shardRoutingTable = clusterService.state().getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id());\n+ ShardRouting relocationTarget = shardRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).get(0);\n+ AllocationService allocationService = ESAllocationTestCase.createAllocationService();\n+ RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget));\n+ ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build();\n+\n+ clusterService.setState(updatedState);\n+ logger.debug(\"--> relocation complete state:\\n{}\", clusterService.state().prettyPrint());\n+\n+ shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());\n+ final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();\n+ final List<CapturingTransport.CapturedRequest> capturedRequests =\n+ transport.capturedRequestsByTargetNode().get(primaryNodeId);\n+ assertThat(capturedRequests, notNullValue());\n+ assertThat(capturedRequests.size(), equalTo(1));\n+ assertThat(capturedRequests.get(0).action, equalTo(\"testAction[p]\"));\n+ assertIndexShardCounter(1);\n+ }\n+\n @Test\n public void testUnknownIndexOrShardOnReroute() throws InterruptedException {\n final String index = \"test\";", "filename": "core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java", "status": "modified" } ] }
{ "body": "Relates to #12573\n\nWhen relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from the relocation source to the relocation target. If relocation source receives and processes this cluster state before the relocation target, there is a time span where relocation source believes active primary to be on relocation target and relocation target believes active primary to be on relocation source. This results in index/delete/flush requests being sent back and forth and can end in an OOM on both nodes.\n\nThis PR adds a field to the index/delete/flush request that helps detect the case where we locally have stale routing information. In case this staleness is detected, we wait until we have received an up-to-date cluster state before rerouting the request.\n\nI have included the test from #12574 in this PR to demonstrate the fix in an integration test. That integration test will not be part of the final commit, however.\n", "comments": [ { "body": "@bleskes instead of using the cluster state version, we could as well use the index metadata version. The index metadata version is updated whenever a new shard is started (thanks to active allocation ids). wdyt?\n\nOn a related note, we could use this field as well to wait for dynamic mapping updates to be applied. (for that the update mappings api would have to return the current index metadata version).\n", "created_at": "2016-01-27T18:25:13Z" }, { "body": "@bleskes renamed the field and removed integration test.\n", "created_at": "2016-02-01T17:43:44Z" }, { "body": "LGTM . Thanks @ywelsch - Left some minor comments, no need for another cycle.\n", "created_at": "2016-02-01T17:53:26Z" } ], "number": 16274, "title": "Prevent TransportReplicationAction to route request based on stale local routing table" }
{ "body": "Backport of #16274 to 2.4.0.\nCloses #19187\n", "number": 19296, "review_comments": [], "title": "Prevent TransportReplicationAction to route request based on stale local routing table" }
{ "commits": [ { "message": "Prevent TransportReplicationAction to route request based on stale local routing table\n\nCloses #16274\nCloses #12573\nCloses #12574" } ], "files": [ { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.action.support.replication;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionRequest;\n import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.action.IndicesRequest;\n@@ -58,6 +59,8 @@ public class ReplicationRequest<T extends ReplicationRequest<T>> extends ChildTa\n private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;\n private volatile boolean canHaveDuplicates = false;\n \n+ private long routedBasedOnClusterVersion = 0;\n+\n public ReplicationRequest() {\n \n }\n@@ -170,6 +173,20 @@ public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {\n return (T) this;\n }\n \n+ /**\n+ * Sets the minimum version of the cluster state that is required on the next node before we redirect to another primary.\n+ * Used to prevent redirect loops, see also {@link TransportReplicationAction.ReroutePhase#doRun()}\n+ */\n+ @SuppressWarnings(\"unchecked\")\n+ T routedBasedOnClusterVersion(long routedBasedOnClusterVersion) {\n+ this.routedBasedOnClusterVersion = routedBasedOnClusterVersion;\n+ return (T) this;\n+ }\n+\n+ long routedBasedOnClusterVersion() {\n+ return routedBasedOnClusterVersion;\n+ }\n+\n @Override\n public ActionRequestValidationException validate() {\n ActionRequestValidationException validationException = null;\n@@ -192,6 +209,9 @@ public void readFrom(StreamInput in) throws IOException {\n index = in.readString();\n canHaveDuplicates = in.readBoolean();\n // no need to serialize threaded* parameters, since they only matter locally\n+ if (in.getVersion().onOrAfter(Version.V_2_4_0)) {\n+ routedBasedOnClusterVersion = in.readVLong();\n+ }\n }\n \n @Override\n@@ -207,6 +227,9 @@ public void writeTo(StreamOutput out) throws IOException {\n timeout.writeTo(out);\n out.writeString(index);\n out.writeBoolean(canHaveDuplicates);\n+ if (out.getVersion().onOrAfter(Version.V_2_4_0)) {\n+ out.writeVLong(routedBasedOnClusterVersion);\n+ }\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java", "status": "modified" }, { "diff": "@@ -479,6 +479,15 @@ protected void doRun() {\n }\n performAction(node, transportPrimaryAction, true);\n } else {\n+ if (state.version() < request.routedBasedOnClusterVersion()) {\n+ logger.trace(\"failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...\", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());\n+ retryBecauseUnavailable(request.shardId(), \"failed to find primary as current cluster state with version [\" + state.version() + \"] is stale (expected at least [\" + request.routedBasedOnClusterVersion() + \"]\");\n+ return;\n+ } else {\n+ // chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version\n+ // this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.\n+ request.routedBasedOnClusterVersion(state.version());\n+ }\n if (logger.isTraceEnabled()) {\n logger.trace(\"send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]\", actionName, request.shardId(), request, state.version(), primary.currentNodeId());\n }", "filename": "core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java", "status": "modified" }, { "diff": "@@ -38,6 +38,7 @@\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;\n import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.cluster.routing.IndexShardRoutingTable;\n import org.elasticsearch.cluster.routing.ShardIterator;\n import org.elasticsearch.cluster.routing.ShardRouting;\n@@ -56,6 +57,7 @@\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.shard.ShardNotFoundException;\n import org.elasticsearch.rest.RestStatus;\n+import org.elasticsearch.test.ESAllocationTestCase;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.cluster.TestClusterService;\n import org.elasticsearch.test.transport.CapturingTransport;\n@@ -72,6 +74,7 @@\n \n import java.io.IOException;\n import java.util.ArrayList;\n+import java.util.Arrays;\n import java.util.HashMap;\n import java.util.HashSet;\n import java.util.List;\n@@ -217,6 +220,59 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept\n assertIndexShardCounter(1);\n }\n \n+ /**\n+ * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from\n+ * the relocation source to the relocation target. If relocation source receives and processes this cluster state\n+ * before the relocation target, there is a time span where relocation source believes active primary to be on\n+ * relocation target and relocation target believes active primary to be on relocation source. This results in replication\n+ * requests being sent back and forth.\n+ *\n+ * This test checks that replication request is not routed back from relocation target to relocation source in case of\n+ * stale index routing table on relocation target.\n+ */\n+ @Test\n+ public void testNoRerouteOnStaleClusterState() throws InterruptedException, ExecutionException {\n+ final String index = \"test\";\n+ final ShardId shardId = new ShardId(index, 0);\n+ ClusterState state = state(index, true, ShardRoutingState.RELOCATING);\n+ IndexShardRoutingTable shardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id());\n+ String relocationTargetNode = shardRoutingTable.primaryShard().relocatingNodeId();\n+ state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build();\n+ clusterService.setState(state);\n+ logger.debug(\"--> relocation ongoing state:\\n{}\", clusterService.state().prettyPrint());\n+\n+ Request request = new Request(shardId).timeout(\"1ms\").routedBasedOnClusterVersion(clusterService.state().version() + 1);\n+ PlainActionFuture<Response> listener = new PlainActionFuture<>();\n+ TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);\n+ reroutePhase.run();\n+ assertListenerThrows(\"cluster state too old didn't cause a timeout\", listener, UnavailableShardsException.class);\n+\n+ request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1);\n+ listener = new PlainActionFuture<>();\n+ reroutePhase = action.new ReroutePhase(null, request, listener);\n+ reroutePhase.run();\n+ assertFalse(\"cluster state too old didn't cause a retry\", listener.isDone());\n+\n+ // finish relocation\n+ shardRoutingTable = clusterService.state().getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id());\n+ ShardRouting relocationTarget = shardRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).get(0);\n+ AllocationService allocationService = ESAllocationTestCase.createAllocationService();\n+ RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget));\n+ ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build();\n+\n+ clusterService.setState(updatedState);\n+ logger.debug(\"--> relocation complete state:\\n{}\", clusterService.state().prettyPrint());\n+\n+ shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());\n+ final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();\n+ final List<CapturingTransport.CapturedRequest> capturedRequests =\n+ transport.capturedRequestsByTargetNode().get(primaryNodeId);\n+ assertThat(capturedRequests, notNullValue());\n+ assertThat(capturedRequests.size(), equalTo(1));\n+ assertThat(capturedRequests.get(0).action, equalTo(\"testAction[p]\"));\n+ assertIndexShardCounter(1);\n+ }\n+\n @Test\n public void testUnknownIndexOrShardOnReroute() throws InterruptedException {\n final String index = \"test\";", "filename": "core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**:\n2.3.3\n\n**JVM version**:\njava version \"1.7.0_101\"\nOpenJDK Runtime Environment (IcedTea 2.6.6) (7u101-2.6.6-0ubuntu0.14.04.1)\nOpenJDK 64-Bit Server VM (build 24.95-b01, mixed mode)\n\n**OS version**:\nUbuntu 14.04.4\n\n**Description of the problem including expected versus actual behavior**:\nI've done a percolation on my index and i've applied a term filter on percolate's `_id` field, but the response is not the expected.\n\nrequest\n\n```\nGET index/type/1/_percolate\n{\n \"filter\": {\n \"term\": {\n \"_id\": \"query_1\"\n }\n }\n}\n```\n\nexpected response\n\n```\n{\n \"took\": 41,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"failed\": 0\n },\n \"total\": 1,\n \"matches\": [\n {\n \"_index\": \"index\",\n \"_id\": \"query_1\"\n }\n ]\n}\n```\n\nactual response\n\n```\n{\n \"took\": 14,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"failed\": 0\n },\n \"total\": 0,\n \"matches\": []\n}\n```\n\n**Steps to reproduce**:\n1. Create an index\n2. Store a query in percolator\n3. And do a percolation with filter on _id\n", "comments": [ { "body": "Yes, for some reason the percolator filter doesn't support the `_id` lookup via `_uid`. You can work around this by filtering on `_uid` instead:\n\n```\n{\n \"filter\": {\n \"term\": {\n \"_uid\": \".percolator#query_1\"\n }\n }\n}\n```\n\nNote: percolation has changed completely in 5.0. Instead of the percolate API we now have the percolate query, which is just part of the query DSL. As such, the rest of the query DSL will work as expected.\n", "created_at": "2016-06-29T12:03:14Z" }, { "body": "Closed via #19210\n", "created_at": "2016-07-05T07:53:57Z" } ], "number": 19130, "title": "[BUG] Not expected behaviour with filter on percolate's _id" }
{ "body": "PR for #19130\n", "number": 19210, "review_comments": [], "title": "Set the SC and QPC type always to `.percolator` in percolate api" }
{ "commits": [ { "message": "percolator: set the SC and QPC type always to `.percolator` in percolate api\n\nCloses #19130" } ], "files": [ { "diff": "@@ -197,6 +197,11 @@ public PercolateShardResponse percolate(PercolateShardRequest request) {\n final PercolateContext context = new PercolateContext(\n request, searchShardTarget, indexShard, percolateIndexService, pageCacheRecycler, bigArrays, scriptService, aliasFilter, parseFieldMatcher\n );\n+ // Some queries (function_score query when for decay functions) rely on a SearchContext being set:\n+ // We switch types because this context needs to be in the context of the percolate queries in the shard and\n+ // not the in memory percolate doc\n+ String[] previousTypes = context.types();\n+ context.types(new String[]{TYPE_NAME});\n SearchContext.setCurrent(context);\n try {\n ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context, request.shardId().getIndex());\n@@ -253,6 +258,7 @@ public PercolateShardResponse percolate(PercolateShardRequest request) {\n percolatorIndex.prepare(context, parsedDocument);\n return action.doPercolate(request, context, isNested);\n } finally {\n+ context.types(previousTypes);\n SearchContext.removeCurrent();\n context.close();\n shardPercolateService.postPercolate(System.nanoTime() - startTime);\n@@ -272,11 +278,6 @@ private ParsedDocument parseRequest(IndexService documentIndexService, Percolate\n ParsedDocument doc = null;\n XContentParser parser = null;\n \n- // Some queries (function_score query when for decay functions) rely on a SearchContext being set:\n- // We switch types because this context needs to be in the context of the percolate queries in the shard and\n- // not the in memory percolate doc\n- String[] previousTypes = context.types();\n- context.types(new String[]{TYPE_NAME});\n try {\n parser = XContentFactory.xContent(source).createParser(source);\n String currentFieldName = null;\n@@ -378,7 +379,6 @@ private ParsedDocument parseRequest(IndexService documentIndexService, Percolate\n } catch (Throwable e) {\n throw new ElasticsearchParseException(\"failed to parse request\", e);\n } finally {\n- context.types(previousTypes);\n if (parser != null) {\n parser.close();\n }", "filename": "core/src/main/java/org/elasticsearch/percolator/PercolatorService.java", "status": "modified" }, { "diff": "@@ -2081,5 +2081,20 @@ public void testWithEmptyFilter() throws Exception {\n assertNoFailures(response1);\n }\n \n+ @Test\n+ public void testWithFilterOnIdField() throws Exception {\n+ createIndex(\"test\");\n+ client().prepareIndex(\"test\", PercolatorService.TYPE_NAME, \"1\")\n+ .setSource(jsonBuilder().startObject().field(\"query\", matchAllQuery()))\n+ .setRefresh(true)\n+ .get();\n+ PercolateResponse response1 = client().preparePercolate()\n+ .setIndices(\"test\").setDocumentType(\"type\")\n+ .setPercolateQuery(termQuery(\"_id\", \"1\"))\n+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(\"{}\"))\n+ .get();\n+ assertMatchCount(response1, 1L);\n+ }\n+\n }\n ", "filename": "core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java", "status": "modified" } ] }
{ "body": "Apparently we published the REST Client on maven central at https://repo1.maven.org/maven2/org/elasticsearch/rest/5.0.0-alpha4/\n\nWhich means that the artifact coordinates are `org.elasticsearch:rest:5.0.0-alpha4`.\nBut it should be `org.elasticsearch.client:rest:5.0.0-alpha4`.\n", "comments": [ { "body": "Also reported at https://discuss.elastic.co/t/restclient-in-5-0-0-alpha4/54509\n", "created_at": "2016-07-01T08:04:26Z" }, { "body": "This is fixed in our gradle configuration, also the artifacts for 5.0.0-alpha4 were added in the proper locations and the groupId was updated in their corresponding poms.\n", "created_at": "2016-07-01T12:29:50Z" } ], "number": 19205, "title": "REST client is not released with the right groupId" }
{ "body": "Build: set group for client and sniffer, disable publishing for client-test\n\nCloses #19205\n", "number": 19206, "review_comments": [], "title": "Build: set group for client and sniffer, disable publishing for client-test" }
{ "commits": [ { "message": "Build: set group for client and sniffer, disable publishing for client-test\n\nCloses #19205" } ], "files": [ { "diff": "@@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'\n targetCompatibility = JavaVersion.VERSION_1_7\n sourceCompatibility = JavaVersion.VERSION_1_7\n \n+group = 'org.elasticsearch.client'\n+\n dependencies {\n compile \"org.apache.httpcomponents:httpclient:${versions.httpclient}\"\n compile \"org.apache.httpcomponents:httpcore:${versions.httpcore}\"", "filename": "client/rest/build.gradle", "status": "modified" }, { "diff": "@@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'\n targetCompatibility = JavaVersion.VERSION_1_7\n sourceCompatibility = JavaVersion.VERSION_1_7\n \n+group = 'org.elasticsearch.client'\n+\n dependencies {\n compile \"org.elasticsearch.client:rest:${version}\"\n compile \"org.apache.httpcomponents:httpclient:${versions.httpclient}\"", "filename": "client/sniffer/build.gradle", "status": "modified" }, { "diff": "@@ -26,6 +26,9 @@ apply plugin: 'ru.vyarus.animalsniffer'\n targetCompatibility = JavaVersion.VERSION_1_7\n sourceCompatibility = JavaVersion.VERSION_1_7\n \n+install.enabled = false\n+uploadArchives.enabled = false\n+\n dependencies {\n compile \"com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}\"\n compile \"junit:junit:${versions.junit}\"", "filename": "client/test/build.gradle", "status": "modified" } ] }
{ "body": "Using 2.2.0, I am unable to delete by query for data that has been indexed using `external_gte` [version type](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types). Here's the error that I'm receiving:\n\n```\n[ec2-user@es1-dev ~]$ curl -XDELETE 'http://es1:9200/testindex/_query?q=repo:testing'\n{\"error\":{\"root_cause\":[{\"type\":\"action_request_validation_exception\",\"reason\":\"Validation Failed: 1: illegal version value [0] for version type [INTERNAL];2: illegal version value [0] for version type [INTERNAL];3: illegal version value [0] for version type [INTERNAL];4: illegal version value [0] for version type [INTERNAL];5: illegal version value [0] for version type [INTERNAL];6: illegal version value [0] for version type [INTERNAL];7: illegal version value [0] for version type [INTERNAL];8: illegal version value [0] for version type [INTERNAL];9: illegal version value [0] for version type [INTERNAL];10: illegal version value [0] for version type [INTERNAL];\"}],\"type\":\"action_request_validation_exception\",\"reason\":\"Validation Failed: 1: illegal version value [0] for version type [INTERNAL];2: illegal version value [0] for version type [INTERNAL];3: illegal version value [0] for version type [INTERNAL];4: illegal version value [0] for version type [INTERNAL];5: illegal version value [0] for version type [INTERNAL];6: illegal version value [0] for version type [INTERNAL];7: illegal version value [0] for version type [INTERNAL];8: illegal version value [0] for version type [INTERNAL];9: illegal version value [0] for version type [INTERNAL];10: illegal version value [0] for version type [INTERNAL];\"},\"status\":400}\n```\n\nThe delete by query succeeds for an index that doesn't use `external_gte`.\n\nthanks!\n", "comments": [ { "body": "@bleskes what do you think?\n", "created_at": "2016-02-14T00:13:23Z" }, { "body": "This indeed an unfortunate case where internal and external versioning do not mix well. Internal version mean that ES is the source of truth for changes - it is incremented with every change in ES and starts with 1. External versioning assumes that some other system tracks document changes (including deletes). Originally 0 was an invalid value for external versioning but it wasn't enforced in code. When we fixed the latter people complained and we have changed semantics to allow 0 as a valid external value (see https://github.com/elastic/elasticsearch/issues/5662). Now you can insert a value that's valid as an external version but is illegal for internal.\n\nThe delete by query plugin uses internal versioning to make sure the documents it deletes didn't change during it's operations. However, since the documents were indexed using the external versioning, their version is 0 which is illegal. \n\nCan you tell us a bit more about your setup? Why are you using the delete by query plugin where you have some external source of truth? I would presume you would delete documents there first and have those propagated to ES as deletes with an external version?\n", "created_at": "2016-02-15T14:17:36Z" }, { "body": "We receive our data from a third-party that supplies versions, starting with 0. For one of our indexes, we only care about the most recent version of a given resource, but need to be able to support reloading old data (mapping changes, etc). In order to ensure we're only keeping the latest (regardless of order received) we've gone with indexing using `external_gte`. Our process simply ignores the VersionConflictException that gets returned when attempting to add an older version. It has worked rather well for us.\n\nPeriodically, we'll need to delete data, for a variety of reasons. These are one-off deletes, usually related to expiring license agreements and such, and are separate from any versioning scheme. Historically we've just manually done a delete by query to handle these cases, which has served us well until recently.\n", "created_at": "2016-02-15T15:27:01Z" }, { "body": "I'm using internal indexing and hitting this on index...\n\n illegal version value [0] for version type [INTERNAL];\n", "created_at": "2016-09-29T19:37:17Z" }, { "body": "@niemyjski as we discussed in another issue, your issue is different than this one.\n\n@natelapp thanks for the update. The problem is that currently doesn't align with the main use case for external versioning, where some external source owns all changes to the documents, including deletes. I haven't come up with a clean way of allowing you to do what you need plus making other use cases work without surprises. As a workaround for now, I think the easiest for you is to always +1 the version you get from your data source (to allow a delete by query operation).\n", "created_at": "2016-10-03T11:35:55Z" }, { "body": "@natelapp thanks for reporting this issue. The issue has been fixed in the upcoming 6.7 and 7.0 versions and I will therefore close this issue.", "created_at": "2019-03-26T13:44:22Z" } ], "number": 16654, "title": "ES 2.2.0 delete by query plugin fails for data with external versioning" }
{ "body": "Update-By-Query and Delete-By-Query use internal versioning to update/delete documents. But documents can have a version number equal to zero using the external versioning... making the UBQ/DBQ request fail because zero is not a valid version number and they only support internal versioning for now. Sequence numbers might help to solve this issue in the future.\n\nRelated to #16654 and  #18750\n", "number": 19180, "review_comments": [ { "body": "You say update-by-query but this is the delete-by-query doc? \n", "created_at": "2016-06-30T13:26:45Z" }, { "body": "Space after internal\n", "created_at": "2016-06-30T13:27:11Z" }, { "body": "That was intended to check if the reviewer did a good job. Or it might just be a copy/paste error... :)\n", "created_at": "2016-06-30T13:48:20Z" } ], "title": "[Doc] Document Update/Delete-By-Query with version number zero" }
{ "commits": [ { "message": "Document Update/Delete-By-Query with version number zero\n\nUpdate-By-Query and Delete-By-Query use internal versioning to update/delete documents. But documents can have a version number equal to zero using the external versioning... making the UBQ/DBQ request fail because zero is not a valid version number and they only support internal versioning for now. Sequence numbers might help to solve this issue in the future." } ], "files": [ { "diff": "@@ -54,6 +54,10 @@ conflict if the document changes between the time when the snapshot was taken\n and when the delete request is processed. When the versions match the document\n is deleted.\n \n+NOTE: Since `internal` versioning does not support the value 0 as a valid\n+version number, documents with version equal to zero cannot be deleted using\n+`_delete_by_query` and will fail the request.\n+\n During the `_delete_by_query` execution, multiple search requests are sequentially\n executed in order to find all the matching documents to delete. Every time a batch\n of documents is found, a corresponding bulk request is executed to delete all", "filename": "docs/reference/docs/delete-by-query.asciidoc", "status": "modified" }, { "diff": "@@ -119,6 +119,14 @@ indexed and the new version number used. If the value provided is less\n than or equal to the stored document's version number, a version\n conflict will occur and the index operation will fail.\n \n+WARNING: External versioning supports the value 0 as a valid version number.\n+This allows the version to be in sync with an external versioning system\n+where version numbers start from zero instead of one. It has the side effect\n+that documents with version number equal to zero cannot neither be updated\n+using the <<docs-update-by-query,Update-By-Query API>> nor be deleted\n+using the <<docs-delete-by-query,Delete By Query API>> as long as their\n+version number is equal to zero.\n+\n A nice side effect is that there is no need to maintain strict ordering\n of async indexing operations executed as a result of changes to a source\n database, as long as version numbers from the source database are used.", "filename": "docs/reference/docs/index_.asciidoc", "status": "modified" }, { "diff": "@@ -46,6 +46,10 @@ conflict if the document changes between the time when the snapshot was taken\n and when the index request is processed. When the versions match the document\n is updated and the version number is incremented.\n \n+NOTE: Since `internal` versioning does not support the value 0 as a valid\n+version number, documents with version equal to zero cannot be updated using\n+`_update_by_query` and will fail the request.\n+\n All update and query failures cause the `_update_by_query` to abort and are\n returned in the `failures` of the response. The updates that have been\n performed still stick. In other words, the process is not rolled back, only", "filename": "docs/reference/docs/update-by-query.asciidoc", "status": "modified" }, { "diff": "@@ -0,0 +1,29 @@\n+---\n+\"delete_by_query fails to delete documents with version number equal to zero\":\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 1\n+ version: 0 # Starting version is zero\n+ version_type: external\n+ body: {\"delete\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ # Delete by query uses internal versioning and will fail here\n+ # because zero is not allowed as a valid version number\n+ - do:\n+ catch: /illegal version value \\[0\\] for version type \\[INTERNAL\\]./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ query:\n+ match_all: {}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yaml", "status": "added" }, { "diff": "@@ -21,3 +21,30 @@\n type: test\n id: 1\n - match: {_version: 2}\n+\n+---\n+\"update_by_query fails to update documents with version number equal to zero\":\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 1\n+ version: 0 # Starting version is zero\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ # Update by query uses internal versioning and will fail here\n+ # because zero is not allowed as a valid version number\n+ - do:\n+ catch: /illegal version value \\[0\\] for version type \\[INTERNAL\\]./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yaml", "status": "modified" } ] }
{ "body": "Note: PR against 2.4 branch, master will follow.\n\nS3 repository needs a special permission to work because when no region is explicitly set the AWS SDK will load a JSON file that contain all Amazon's endpoints and will map the content of this file to plain old Java objects. To do that, it uses Jackson's databinding and reflection that require the `java.lang.reflect.ReflectPermission \"suppressAccessChecks\"` permission.\n\nThis issue only occur if no region is set in the repository setting and in the elasticsearch.yml file.\n\ncloses #18539\n", "comments": [ { "body": "It looks good to me.\n", "created_at": "2016-06-28T10:17:42Z" }, { "body": "That being said, I wonder if we should better ourself fall back setting to a default region than giving permission to the SM\n", "created_at": "2016-06-28T10:18:56Z" }, { "body": "We could fall back to default US region, but I think that some users also use this plugin with their own custom endpoint and enforcing a AWS default region here might be problematic?\n", "created_at": "2016-06-28T12:22:24Z" }, { "body": "IIRC setting the endpoint has precedence.\n", "created_at": "2016-06-28T12:24:36Z" }, { "body": "I think defaulting to a region, whatever it is, is too trappy. I think that the current way endpoint & region settings are managed in the plugin is not fully coherent with the AWS SDK.\n\nFor example, this does not work:\n\n```\nPUT /_snapshot/my_s3_repository '{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"cloud-aws-test\", \n \"region\": \"us-east\" \n }\n}'\n```\n\nbecause we set the default endpoint to `s3.amazonaws.com` for regions \"us-east\" and \"us-east-1\". \n\nI think we must review the way region override endpoints but for now I'm just fixing things so that it works. \n\nSo I'm +1 on adding the special permission for now.\n", "created_at": "2016-06-28T13:33:48Z" }, { "body": "Same comment as on #19128 \n\nMy objection is with the explanation: it makes it seem as if this is \"justified\", it is not. It is simply shitty code AWS code: they need to fix their access modifiers. Its not necessary.\n\nSorry but, if we explain it like we currently do, it makes it sound like they are doing nothing wrong, and nobody will ever fix it. The truth is you can submit a PR to AWS adding a missing `public` and the bug goes away.\n", "created_at": "2016-06-28T16:45:38Z" }, { "body": "> Sorry but, if we explain it like we currently do, it makes it sound like they are doing nothing wrong, and nobody will ever fix it. The truth is you can submit a PR to AWS adding a missing public and the bug goes away.\n\n@tlrx can you open an issue with them to fix this?\n", "created_at": "2016-06-28T20:29:44Z" }, { "body": "I think its ok to give the permission for now, before pushing I just want the comment to be correct so we know its a fixable situation. They have fixed this problem before in another part of the code (their configuration uses the same serialization).\n", "created_at": "2016-06-28T20:35:46Z" }, { "body": "> My objection is with the explanation: it makes it seem as if this is \"justified\", it is not. It is simply shitty code AWS code: they need to fix their access modifiers. Its not necessary.\n\nI agree, my explanation is misleading, sorry. This is all my brain was able to produce in English after having spent so much time debugging ec2/s3 stuff.\n\nI pointed to Jackson's because the stacktrace in #18539 shows that `ClassUtil.checkAndFixAccess()` method throws the exception when it tries to call `setAccessible(true)` on a public constructor without even checking the modifiers first, which seemed strange to me. Since the AWS `Partitions` class looked good to me (I also suspected a missing `public` on ctor or setter... did I miss something?) I suspected that Jackson's should check the modifiers first like it does [in more recent versions](https://github.com/FasterXML/jackson-databind/blob/master/src/main/java/com/fasterxml/jackson/databind/util/ClassUtil.java#L864-L865).\n\nI stopped there, hoping that a more recent version of AWS SDK will use a more recent version of Jackon Databinding that has more checks and options to configure object bindings.\n\n> Sorry but, if we explain it like we currently do, it makes it sound like they are doing nothing wrong, and nobody will ever fix it. \n\nI agree, I updated my comment. Thanks for your feedback, please let me know if that's better now.\n\n> The truth is you can submit a PR to AWS adding a missing public and the bug goes away.\n\nThat was my first guess too but I think now that the issue can only be fixed with an update of the version of Jackson used by AWS SDK + a better configuration of Jackson's object mapper used by AWS SDK (like disabling MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS / OVERRIDE_PUBLIC_ACCESS_MODIFIERS / ALLOW_FINAL_FIELDS_AS_MUTATORS). So many things just to load a JSON config file...\n\nI'll create an issue in the aws sdk java GitHub repository to track this.\n\nEdit: Finally found https://github.com/aws/aws-sdk-java/issues/528 and created https://github.com/aws/aws-sdk-java/issues/766 to track this\n", "created_at": "2016-06-30T08:37:59Z" }, { "body": "+1\n", "created_at": "2016-06-30T16:16:40Z" }, { "body": "Merged in ef1bbe46c11423f1e400d643059d4ab52d1b88be\n", "created_at": "2016-07-01T07:55:47Z" } ], "number": 19121, "title": "Add missing permission for S3 repository" }
{ "body": "Note: PR against master branch, 2.4 is #19121\n\nS3 repository needs a special permission to work because when no region is explicitly set the AWS SDK will load a JSON file that contain all Amazon's endpoints and will map the content of this file to plain old Java objects. To do that, it uses Jackson's databinding and reflection that require the java.lang.reflect.ReflectPermission \"suppressAccessChecks\" permission.\n\nThis issue only occur if no region is set in the repository setting and in the elasticsearch.yml file.\n\ncloses #18539\n", "number": 19128, "review_comments": [], "title": "Add missing permission to repository-s3" }
{ "commits": [ { "message": "Add missing permission to repository-s3\n\nRepository-S3 needs a special permission because of problems in AmazonS3Client: when no region is set on a AmazonS3Client instance, the AWS SDK loads all known partitions from a JSON file and uses a Jackson's ObjectMapper for that: this one, in version 2.5.3 with the default binding options, tries to suppress access checks of ctor/field/method and thus requires this special permission. AWS must be fixed to uses Jackson correctly and have the correct modifiers on binded classes.\n\nThis must be fixed in aws sdk (see https://github.com/aws/aws-sdk-java/issues/766) but in the meanwhile we have no choice.\n\ncloses #18539" } ], "files": [ { "diff": "@@ -26,6 +26,7 @@\n import com.amazonaws.services.s3.model.ObjectMetadata;\n import com.amazonaws.services.s3.model.S3Object;\n import com.amazonaws.services.s3.model.S3ObjectSummary;\n+import org.elasticsearch.SpecialPermission;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.blobstore.BlobMetaData;\n import org.elasticsearch.common.blobstore.BlobPath;\n@@ -40,6 +41,9 @@\n import java.io.IOException;\n import java.io.InputStream;\n import java.io.OutputStream;\n+import java.security.AccessController;\n+import java.security.PrivilegedActionException;\n+import java.security.PrivilegedExceptionAction;\n import java.util.Map;\n \n /**\n@@ -60,8 +64,14 @@ public S3BlobContainer(BlobPath path, S3BlobStore blobStore) {\n @Override\n public boolean blobExists(String blobName) {\n try {\n- blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));\n- return true;\n+ return doPrivileged(() -> {\n+ try {\n+ blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));\n+ return true;\n+ } catch (AmazonS3Exception e) {\n+ return false;\n+ }\n+ });\n } catch (AmazonS3Exception e) {\n return false;\n } catch (Throwable e) {\n@@ -180,4 +190,19 @@ protected String buildKey(String blobName) {\n return keyPath + blobName;\n }\n \n+ /**\n+ * + * Executes a {@link PrivilegedExceptionAction} with privileges enabled.\n+ * +\n+ */\n+ <T> T doPrivileged(PrivilegedExceptionAction<T> operation) throws IOException {\n+ SecurityManager sm = System.getSecurityManager();\n+ if (sm != null) {\n+ sm.checkPermission(new SpecialPermission());\n+ }\n+ try {\n+ return AccessController.doPrivileged(operation);\n+ } catch (PrivilegedActionException e) {\n+ throw (IOException) e.getException();\n+ }\n+ }\n }", "filename": "plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java", "status": "modified" }, { "diff": "@@ -22,4 +22,16 @@ grant {\n // TODO: get these fixed in aws sdk\n permission java.lang.RuntimePermission \"accessDeclaredMembers\";\n permission java.lang.RuntimePermission \"getClassLoader\";\n+ // Needed because of problems in AmazonS3Client:\n+ // When no region is set on a AmazonS3Client instance, the\n+ // AWS SDK loads all known partitions from a JSON file and\n+ // uses a Jackson's ObjectMapper for that: this one, in\n+ // version 2.5.3 with the default binding options, tries\n+ // to suppress access checks of ctor/field/method and thus\n+ // requires this special permission. AWS must be fixed to\n+ // uses Jackson correctly and have the correct modifiers\n+ // on binded classes.\n+ // TODO: get these fixed in aws sdk\n+ // See https://github.com/aws/aws-sdk-java/issues/766\n+ permission java.lang.reflect.ReflectPermission \"suppressAccessChecks\";\n };", "filename": "plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.2\n\n**JVM version**: 25.91-b14\n\n**OS version**: \n\n**Description of the problem including expected versus actual behavior**:\n\n**Steps to reproduce**:\n\n``` sh\ncurl -XPUT 'http://localhost:9200/_snapshot/test-snapshot' -d '{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"test-snapshot\",\n \"access_key\": \"XXXXX\", \n \"secret_key\": \"XXXX\" }\n}'\n```\n\n**Provide logs (if relevant)**:\n\n```\n[2016-05-24 00:12:38,425][WARN ][repositories ] [54.210.62.226 (m3.xlarge) - i-4e4e7ac9] [awsmt-deviceprofile-es-snapshot] failed to verify repository\nBlobStoreException[failed to check if blob exists]; nested: AmazonClientException[Error while loading partitions file from com/amazonaws/partitions/endpoints.json]; nested: JsonMappingException[Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")]; nested: IllegalArgumentException[Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")];\n at org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.blobExists(S3BlobContainer.java:65)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository.verify(BlobStoreIndexShardRepository.java:240)\n at org.elasticsearch.repositories.VerifyNodeRepositoryAction.doVerify(VerifyNodeRepositoryAction.java:121)\n at org.elasticsearch.repositories.VerifyNodeRepositoryAction.verify(VerifyNodeRepositoryAction.java:86)\n at org.elasticsearch.repositories.RepositoriesService.verifyRepository(RepositoriesService.java:214)\n at org.elasticsearch.repositories.RepositoriesService$VerifyingRegisterRepositoryListener.onResponse(RepositoriesService.java:436)\n at org.elasticsearch.repositories.RepositoriesService$VerifyingRegisterRepositoryListener.onResponse(RepositoriesService.java:421)\n at org.elasticsearch.cluster.AckedClusterStateUpdateTask.onAllNodesAcked(AckedClusterStateUpdateTask.java:63)\n at org.elasticsearch.cluster.service.InternalClusterService$SafeAckedClusterStateTaskListener.onAllNodesAcked(InternalClusterService.java:733)\n at org.elasticsearch.cluster.service.InternalClusterService$AckCountDownListener.onNodeAck(InternalClusterService.java:1013)\n at org.elasticsearch.cluster.service.InternalClusterService$DelegetingAckListener.onNodeAck(InternalClusterService.java:952)\n at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:637)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: com.amazonaws.AmazonClientException: Error while loading partitions file from com/amazonaws/partitions/endpoints.json\n at com.amazonaws.partitions.PartitionsLoader.loadPartitionFromStream(PartitionsLoader.java:99)\n at com.amazonaws.partitions.PartitionsLoader.build(PartitionsLoader.java:88)\n at com.amazonaws.regions.RegionMetadataFactory.create(RegionMetadataFactory.java:30)\n at com.amazonaws.regions.RegionUtils.initialize(RegionUtils.java:66)\n at com.amazonaws.regions.RegionUtils.getRegionMetadata(RegionUtils.java:54)\n at com.amazonaws.regions.RegionUtils.getRegion(RegionUtils.java:104)\n at com.amazonaws.services.s3.AmazonS3Client.resolveServiceEndpoint(AmazonS3Client.java:4195)\n at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1006)\n at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:991)\n at org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.blobExists(S3BlobContainer.java:60)\n ... 17 more\nCaused by: com.fasterxml.jackson.databind.JsonMappingException: Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCache2(DeserializerCache.java:269)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCacheValueDeserializer(DeserializerCache.java:244)\n at com.fasterxml.jackson.databind.deser.DeserializerCache.findValueDeserializer(DeserializerCache.java:142)\n at com.fasterxml.jackson.databind.DeserializationContext.findRootValueDeserializer(DeserializationContext.java:461)\n at com.fasterxml.jackson.databind.ObjectMapper._findRootDeserializer(ObjectMapper.java:3833)\n at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:3727)\n at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2794)\n at com.amazonaws.partitions.PartitionsLoader.loadPartitionFromStream(PartitionsLoader.java:96)\n ... 26 more\nCaused by: java.lang.IllegalArgumentException: Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")\n at com.fasterxml.jackson.databind.util.ClassUtil.checkAndFixAccess(ClassUtil.java:513)\n at com.fasterxml.jackson.databind.deser.impl.CreatorCollector._fixAccess(CreatorCollector.java:280)\n at com.fasterxml.jackson.databind.deser.impl.CreatorCollector.verifyNonDup(CreatorCollector.java:327)\n at com.fasterxml.jackson.databind.deser.impl.CreatorCollector.addPropertyCreator(CreatorCollector.java:184)\n at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addDeserializerConstructors(BasicDeserializerFactory.java:493)\n at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._constructDefaultValueInstantiator(BasicDeserializerFactory.java:324)\n at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findValueInstantiator(BasicDeserializerFactory.java:254)\n at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.buildBeanDeserializer(BeanDeserializerFactory.java:222)\n at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.createBeanDeserializer(BeanDeserializerFactory.java:142)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createDeserializer2(DeserializerCache.java:403)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createDeserializer(DeserializerCache.java:352)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCache2(DeserializerCache.java:264)\n ... 33 more\n```\n", "comments": [ { "body": "@dadoonet could you take a look please?\n", "created_at": "2016-05-24T09:36:50Z" }, { "body": "Is this blocking the release of v2.3.4?\n", "created_at": "2016-06-22T17:36:38Z" }, { "body": "@yamap77 Did you set any specific metadata for the S3 bucket: `test-snapshot`?\nCould you also try to explicitly set a region like `\"region\": \"us-west\"`?\n", "created_at": "2016-06-23T20:45:36Z" }, { "body": "Hi,\n\nI had fixed this issue. It turned out that I have to disable the security manager. After I disable it, I can sent data to s3 now.\n", "created_at": "2016-06-23T21:03:52Z" }, { "body": "I'm reopening it. You should never disable the security manager. We need to understand what is happening.\n\nI'll try to reproduce but if you have any details which would help to understand I'd appreciate a lot!\n", "created_at": "2016-06-23T21:24:50Z" }, { "body": "Hi,\nCurious to know if this issue is resolved in 2.3.5? \n", "created_at": "2016-11-03T07:16:22Z" }, { "body": "@mahesh-maney I don't think so. It's marked as 2.4.0 and 5.0.0 and according to the commits it has not been back ported in 2.3 branch.\n", "created_at": "2016-11-03T07:31:24Z" }, { "body": "FWIW, I am hacking the 2.4 branch with the ultimate goal of creating a S3 plugin that uses KMS encryption. \r\n\r\nI can add a single line to` org.elasticsearch.cloud.aws.InternalAwsS3Service`\r\n\r\n` Region region1 = Region.getRegion(Regions.US_EAST_1);\r\n`\r\nand reproduce what I think is this issue. \r\n\r\n```\r\nCaused by: java.lang.IllegalArgumentException: Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")\r\n\tat com.fasterxml.jackson.databind.util.ClassUtil.checkAndFixAccess(ClassUtil.java:505)\r\n\tat com.fasterxml.jackson.databind.deser.impl.CreatorCollector._fixAccess(CreatorCollector.java:271)\r\n\r\n```\r\n\r\nI did verify that the patch mentioned here is in code I am working with. \r\n\r\nIf anyone has some ideas for a work around I'd appreciate it. (Also, apologies if this is wrong place for this kind of info but I thought it was relevant.) \r\n\r\n", "created_at": "2017-04-28T11:19:41Z" } ], "number": 18539, "title": "Jackson databind Exception when creating repository/sending snapshot to s3" }
{ "body": "Note: PR against master branch, 2.4 is #19121\n\nS3 repository needs a special permission to work because when no region is explicitly set the AWS SDK will load a JSON file that contain all Amazon's endpoints and will map the content of this file to plain old Java objects. To do that, it uses Jackson's databinding and reflection that require the java.lang.reflect.ReflectPermission \"suppressAccessChecks\" permission.\n\nThis issue only occur if no region is set in the repository setting and in the elasticsearch.yml file.\n\ncloses #18539\n", "number": 19128, "review_comments": [], "title": "Add missing permission to repository-s3" }
{ "commits": [ { "message": "Add missing permission to repository-s3\n\nRepository-S3 needs a special permission because of problems in AmazonS3Client: when no region is set on a AmazonS3Client instance, the AWS SDK loads all known partitions from a JSON file and uses a Jackson's ObjectMapper for that: this one, in version 2.5.3 with the default binding options, tries to suppress access checks of ctor/field/method and thus requires this special permission. AWS must be fixed to uses Jackson correctly and have the correct modifiers on binded classes.\n\nThis must be fixed in aws sdk (see https://github.com/aws/aws-sdk-java/issues/766) but in the meanwhile we have no choice.\n\ncloses #18539" } ], "files": [ { "diff": "@@ -26,6 +26,7 @@\n import com.amazonaws.services.s3.model.ObjectMetadata;\n import com.amazonaws.services.s3.model.S3Object;\n import com.amazonaws.services.s3.model.S3ObjectSummary;\n+import org.elasticsearch.SpecialPermission;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.blobstore.BlobMetaData;\n import org.elasticsearch.common.blobstore.BlobPath;\n@@ -40,6 +41,9 @@\n import java.io.IOException;\n import java.io.InputStream;\n import java.io.OutputStream;\n+import java.security.AccessController;\n+import java.security.PrivilegedActionException;\n+import java.security.PrivilegedExceptionAction;\n import java.util.Map;\n \n /**\n@@ -60,8 +64,14 @@ public S3BlobContainer(BlobPath path, S3BlobStore blobStore) {\n @Override\n public boolean blobExists(String blobName) {\n try {\n- blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));\n- return true;\n+ return doPrivileged(() -> {\n+ try {\n+ blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));\n+ return true;\n+ } catch (AmazonS3Exception e) {\n+ return false;\n+ }\n+ });\n } catch (AmazonS3Exception e) {\n return false;\n } catch (Throwable e) {\n@@ -180,4 +190,19 @@ protected String buildKey(String blobName) {\n return keyPath + blobName;\n }\n \n+ /**\n+ * + * Executes a {@link PrivilegedExceptionAction} with privileges enabled.\n+ * +\n+ */\n+ <T> T doPrivileged(PrivilegedExceptionAction<T> operation) throws IOException {\n+ SecurityManager sm = System.getSecurityManager();\n+ if (sm != null) {\n+ sm.checkPermission(new SpecialPermission());\n+ }\n+ try {\n+ return AccessController.doPrivileged(operation);\n+ } catch (PrivilegedActionException e) {\n+ throw (IOException) e.getException();\n+ }\n+ }\n }", "filename": "plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java", "status": "modified" }, { "diff": "@@ -22,4 +22,16 @@ grant {\n // TODO: get these fixed in aws sdk\n permission java.lang.RuntimePermission \"accessDeclaredMembers\";\n permission java.lang.RuntimePermission \"getClassLoader\";\n+ // Needed because of problems in AmazonS3Client:\n+ // When no region is set on a AmazonS3Client instance, the\n+ // AWS SDK loads all known partitions from a JSON file and\n+ // uses a Jackson's ObjectMapper for that: this one, in\n+ // version 2.5.3 with the default binding options, tries\n+ // to suppress access checks of ctor/field/method and thus\n+ // requires this special permission. AWS must be fixed to\n+ // uses Jackson correctly and have the correct modifiers\n+ // on binded classes.\n+ // TODO: get these fixed in aws sdk\n+ // See https://github.com/aws/aws-sdk-java/issues/766\n+ permission java.lang.reflect.ReflectPermission \"suppressAccessChecks\";\n };", "filename": "plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.2\n\n**JVM version**: 25.91-b14\n\n**OS version**: \n\n**Description of the problem including expected versus actual behavior**:\n\n**Steps to reproduce**:\n\n``` sh\ncurl -XPUT 'http://localhost:9200/_snapshot/test-snapshot' -d '{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"test-snapshot\",\n \"access_key\": \"XXXXX\", \n \"secret_key\": \"XXXX\" }\n}'\n```\n\n**Provide logs (if relevant)**:\n\n```\n[2016-05-24 00:12:38,425][WARN ][repositories ] [54.210.62.226 (m3.xlarge) - i-4e4e7ac9] [awsmt-deviceprofile-es-snapshot] failed to verify repository\nBlobStoreException[failed to check if blob exists]; nested: AmazonClientException[Error while loading partitions file from com/amazonaws/partitions/endpoints.json]; nested: JsonMappingException[Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")]; nested: IllegalArgumentException[Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")];\n at org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.blobExists(S3BlobContainer.java:65)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository.verify(BlobStoreIndexShardRepository.java:240)\n at org.elasticsearch.repositories.VerifyNodeRepositoryAction.doVerify(VerifyNodeRepositoryAction.java:121)\n at org.elasticsearch.repositories.VerifyNodeRepositoryAction.verify(VerifyNodeRepositoryAction.java:86)\n at org.elasticsearch.repositories.RepositoriesService.verifyRepository(RepositoriesService.java:214)\n at org.elasticsearch.repositories.RepositoriesService$VerifyingRegisterRepositoryListener.onResponse(RepositoriesService.java:436)\n at org.elasticsearch.repositories.RepositoriesService$VerifyingRegisterRepositoryListener.onResponse(RepositoriesService.java:421)\n at org.elasticsearch.cluster.AckedClusterStateUpdateTask.onAllNodesAcked(AckedClusterStateUpdateTask.java:63)\n at org.elasticsearch.cluster.service.InternalClusterService$SafeAckedClusterStateTaskListener.onAllNodesAcked(InternalClusterService.java:733)\n at org.elasticsearch.cluster.service.InternalClusterService$AckCountDownListener.onNodeAck(InternalClusterService.java:1013)\n at org.elasticsearch.cluster.service.InternalClusterService$DelegetingAckListener.onNodeAck(InternalClusterService.java:952)\n at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:637)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: com.amazonaws.AmazonClientException: Error while loading partitions file from com/amazonaws/partitions/endpoints.json\n at com.amazonaws.partitions.PartitionsLoader.loadPartitionFromStream(PartitionsLoader.java:99)\n at com.amazonaws.partitions.PartitionsLoader.build(PartitionsLoader.java:88)\n at com.amazonaws.regions.RegionMetadataFactory.create(RegionMetadataFactory.java:30)\n at com.amazonaws.regions.RegionUtils.initialize(RegionUtils.java:66)\n at com.amazonaws.regions.RegionUtils.getRegionMetadata(RegionUtils.java:54)\n at com.amazonaws.regions.RegionUtils.getRegion(RegionUtils.java:104)\n at com.amazonaws.services.s3.AmazonS3Client.resolveServiceEndpoint(AmazonS3Client.java:4195)\n at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1006)\n at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:991)\n at org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.blobExists(S3BlobContainer.java:60)\n ... 17 more\nCaused by: com.fasterxml.jackson.databind.JsonMappingException: Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCache2(DeserializerCache.java:269)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCacheValueDeserializer(DeserializerCache.java:244)\n at com.fasterxml.jackson.databind.deser.DeserializerCache.findValueDeserializer(DeserializerCache.java:142)\n at com.fasterxml.jackson.databind.DeserializationContext.findRootValueDeserializer(DeserializationContext.java:461)\n at com.fasterxml.jackson.databind.ObjectMapper._findRootDeserializer(ObjectMapper.java:3833)\n at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:3727)\n at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2794)\n at com.amazonaws.partitions.PartitionsLoader.loadPartitionFromStream(PartitionsLoader.java:96)\n ... 26 more\nCaused by: java.lang.IllegalArgumentException: Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")\n at com.fasterxml.jackson.databind.util.ClassUtil.checkAndFixAccess(ClassUtil.java:513)\n at com.fasterxml.jackson.databind.deser.impl.CreatorCollector._fixAccess(CreatorCollector.java:280)\n at com.fasterxml.jackson.databind.deser.impl.CreatorCollector.verifyNonDup(CreatorCollector.java:327)\n at com.fasterxml.jackson.databind.deser.impl.CreatorCollector.addPropertyCreator(CreatorCollector.java:184)\n at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addDeserializerConstructors(BasicDeserializerFactory.java:493)\n at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._constructDefaultValueInstantiator(BasicDeserializerFactory.java:324)\n at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findValueInstantiator(BasicDeserializerFactory.java:254)\n at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.buildBeanDeserializer(BeanDeserializerFactory.java:222)\n at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.createBeanDeserializer(BeanDeserializerFactory.java:142)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createDeserializer2(DeserializerCache.java:403)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createDeserializer(DeserializerCache.java:352)\n at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCache2(DeserializerCache.java:264)\n ... 33 more\n```\n", "comments": [ { "body": "@dadoonet could you take a look please?\n", "created_at": "2016-05-24T09:36:50Z" }, { "body": "Is this blocking the release of v2.3.4?\n", "created_at": "2016-06-22T17:36:38Z" }, { "body": "@yamap77 Did you set any specific metadata for the S3 bucket: `test-snapshot`?\nCould you also try to explicitly set a region like `\"region\": \"us-west\"`?\n", "created_at": "2016-06-23T20:45:36Z" }, { "body": "Hi,\n\nI had fixed this issue. It turned out that I have to disable the security manager. After I disable it, I can sent data to s3 now.\n", "created_at": "2016-06-23T21:03:52Z" }, { "body": "I'm reopening it. You should never disable the security manager. We need to understand what is happening.\n\nI'll try to reproduce but if you have any details which would help to understand I'd appreciate a lot!\n", "created_at": "2016-06-23T21:24:50Z" }, { "body": "Hi,\nCurious to know if this issue is resolved in 2.3.5? \n", "created_at": "2016-11-03T07:16:22Z" }, { "body": "@mahesh-maney I don't think so. It's marked as 2.4.0 and 5.0.0 and according to the commits it has not been back ported in 2.3 branch.\n", "created_at": "2016-11-03T07:31:24Z" }, { "body": "FWIW, I am hacking the 2.4 branch with the ultimate goal of creating a S3 plugin that uses KMS encryption. \r\n\r\nI can add a single line to` org.elasticsearch.cloud.aws.InternalAwsS3Service`\r\n\r\n` Region region1 = Region.getRegion(Regions.US_EAST_1);\r\n`\r\nand reproduce what I think is this issue. \r\n\r\n```\r\nCaused by: java.lang.IllegalArgumentException: Can not access public com.amazonaws.partitions.model.Partitions(java.lang.String,java.util.List) (from class com.amazonaws.partitions.model.Partitions; failed to set access: access denied (\"java.lang.reflect.ReflectPermission\" \"suppressAccessChecks\")\r\n\tat com.fasterxml.jackson.databind.util.ClassUtil.checkAndFixAccess(ClassUtil.java:505)\r\n\tat com.fasterxml.jackson.databind.deser.impl.CreatorCollector._fixAccess(CreatorCollector.java:271)\r\n\r\n```\r\n\r\nI did verify that the patch mentioned here is in code I am working with. \r\n\r\nIf anyone has some ideas for a work around I'd appreciate it. (Also, apologies if this is wrong place for this kind of info but I thought it was relevant.) \r\n\r\n", "created_at": "2017-04-28T11:19:41Z" } ], "number": 18539, "title": "Jackson databind Exception when creating repository/sending snapshot to s3" }
{ "body": "Note: PR against 2.4 branch, master will follow.\n\nS3 repository needs a special permission to work because when no region is explicitly set the AWS SDK will load a JSON file that contain all Amazon's endpoints and will map the content of this file to plain old Java objects. To do that, it uses Jackson's databinding and reflection that require the `java.lang.reflect.ReflectPermission \"suppressAccessChecks\"` permission.\n\nThis issue only occur if no region is set in the repository setting and in the elasticsearch.yml file.\n\ncloses #18539\n", "number": 19121, "review_comments": [], "title": "Add missing permission for S3 repository" }
{ "commits": [ { "message": " Add missing permission for S3 repository\n\n S3 repository needs a special permission to work because when no region is explictly set the AWS SDK will load a JSON file that contain all Amazon's endpoints and will map the content of this file to plain old Java objects. To do that, it uses Jackson's databinding and reflection that require a special permission.\n\n closes #18539" }, { "message": "Update after Robert's comment" } ], "files": [ { "diff": "@@ -20,7 +20,13 @@\n package org.elasticsearch.cloud.aws.blobstore;\n \n import com.amazonaws.AmazonClientException;\n-import com.amazonaws.services.s3.model.*;\n+import com.amazonaws.services.s3.model.AmazonS3Exception;\n+import com.amazonaws.services.s3.model.CopyObjectRequest;\n+import com.amazonaws.services.s3.model.ObjectListing;\n+import com.amazonaws.services.s3.model.ObjectMetadata;\n+import com.amazonaws.services.s3.model.S3Object;\n+import com.amazonaws.services.s3.model.S3ObjectSummary;\n+import org.elasticsearch.SpecialPermission;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.blobstore.BlobMetaData;\n import org.elasticsearch.common.blobstore.BlobPath;\n@@ -33,6 +39,9 @@\n import java.io.IOException;\n import java.io.InputStream;\n import java.io.OutputStream;\n+import java.security.AccessController;\n+import java.security.PrivilegedActionException;\n+import java.security.PrivilegedExceptionAction;\n import java.util.Map;\n \n /**\n@@ -55,10 +64,19 @@ public S3BlobContainer(BlobPath path, S3BlobStore blobStore) {\n }\n \n @Override\n- public boolean blobExists(String blobName) {\n+ public boolean blobExists(final String blobName) {\n try {\n- blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));\n- return true;\n+ return doPrivileged(new PrivilegedExceptionAction<Boolean>() {\n+ @Override\n+ public Boolean run() throws Exception {\n+ try {\n+ blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));\n+ return true;\n+ } catch (AmazonS3Exception e) {\n+ return false;\n+ }\n+ }\n+ });\n } catch (AmazonS3Exception e) {\n return false;\n } catch (Throwable e) {\n@@ -159,4 +177,18 @@ protected String buildKey(String blobName) {\n return keyPath + blobName;\n }\n \n+ /**\n+ * Executes a {@link PrivilegedExceptionAction} with privileges enabled.\n+ */\n+ <T> T doPrivileged(PrivilegedExceptionAction<T> operation) throws IOException {\n+ SecurityManager sm = System.getSecurityManager();\n+ if (sm != null) {\n+ sm.checkPermission(new SpecialPermission());\n+ }\n+ try {\n+ return AccessController.doPrivileged(operation);\n+ } catch (PrivilegedActionException e) {\n+ throw (IOException) e.getException();\n+ }\n+ }\n }", "filename": "plugins/cloud-aws/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java", "status": "modified" }, { "diff": "@@ -23,4 +23,14 @@ grant {\n // NOTE: no tests fail without this, but we know the problem\n // exists in AWS sdk, and tests here are not thorough\n permission java.lang.RuntimePermission \"getClassLoader\";\n+ // Needed because of problems in AmazonS3Client:\n+ // When no region is set on a AmazonS3Client instance, the\n+ // AWS SDK loads all known partitions from a JSON file and\n+ // uses a Jackson's ObjectMapper for that: this one, in\n+ // version 2.5.3 with the default binding options, tries\n+ // to suppress access checks of ctor/field/method and thus\n+ // requires this special permission. AWS must be fixed to\n+ // uses Jackson correctly and have the correct modifiers\n+ // on binded classes.\n+ permission java.lang.reflect.ReflectPermission \"suppressAccessChecks\";\n };", "filename": "plugins/cloud-aws/src/main/plugin-metadata/plugin-security.policy", "status": "modified" } ] }
{ "body": "This `discovery-azure` plugin is broken since 2.x. This commit fixes the plugin so that it can work with the security manager and uses the right classloader when loading its Azure services.\n\nIt's just a fix because testing these things on Azure are very time consuming, but we **really really** need to automatically test this correctly. It's a shame it's been broken for so long.\n\nNote: 2.x fix is similar and will follow soon.\n\nRelated to #18637, #15630\n", "comments": [ { "body": "@dadoonet Thanks for the review. I updated the code, can you please test it on your side?\n", "created_at": "2016-06-27T07:20:59Z" }, { "body": "The change looks good to me.\n", "created_at": "2016-06-27T07:31:40Z" }, { "body": "@tlrx I ran a test today:\n- Started an azure instance (Ubuntu)\n- Installed OpenJDK 8\n- Downloaded Elasticsearch from Maven: https://oss.sonatype.org/content/repositories/snapshots/org/elasticsearch/distribution/zip/elasticsearch/5.0.0-alpha4-SNAPSHOT/elasticsearch-5.0.0-alpha4-20160627.132643-136.zip\n- Uploaded discovery-azure plugin I built locally from your branch\n- Changed `elasticsearch.yml`:\n\n``` yml\ncloud:\n azure:\n management:\n keystore:\n path: /home/elasticsearch/elasticsearch-5.0.0-alpha4-SNAPSHOT/config/azure/azurekeystore.pkcs12\n password: MYPASSWORD\n subscription.id: MYID\n cloud.service.name: MYPROJECT\n\ndiscovery:\n type: azure\n```\n- Changed `logging.yml`:\n\n``` yml\n discovery.azure: TRACE\n cloud.azure: TRACE\n```\n- Started elasticsearch and looked at logs:\n\n```\n[2016-06-27 15:17:43,704][DEBUG][cloud.azure ] [Cyclops] starting azure services\n[2016-06-27 15:17:43,704][TRACE][cloud.azure ] [Cyclops] All required properties for Azure discovery are set!\n[2016-06-27 15:17:43,704][DEBUG][cloud.azure ] [Cyclops] starting azure discovery service\n[2016-06-27 15:17:50,198][TRACE][cloud.azure.management ] [Cyclops] creating new Azure client for [MYID], [MYPROJECT]\n[2016-06-27 15:17:51,560][DEBUG][cloud.azure.management ] [Cyclops] creating new Azure client for [MYID], [MYPROJECT]\n[2016-06-27 15:17:57,336][INFO ][node ] [Cyclops] initialized\n[2016-06-27 15:17:57,336][INFO ][node ] [Cyclops] starting ...\n[2016-06-27 15:17:58,595][INFO ][transport ] [Cyclops] publish_address {127.0.0.1:9300}, bound_addresses {[::1]:9300}, {127.0.0.1:9300}\n[2016-06-27 15:17:58,897][DEBUG][discovery.azure ] [Cyclops] start building nodes list using Azure API\n[2016-06-27 15:18:07,019][TRACE][discovery.azure ] [Cyclops] ip of current node: [/127.0.0.1]\n[2016-06-27 15:18:07,038][TRACE][discovery.azure ] [Cyclops] adding 10.1.0.4, transport_address 10.1.0.4:9300\n[2016-06-27 15:18:07,048][DEBUG][discovery.azure ] [Cyclops] 1 node(s) added\n```\n\nTo me everything is OK. +1 to merge your PR.\n\nThanks!\n", "created_at": "2016-06-27T15:24:29Z" }, { "body": "Unfortunately this bug has not been caught by the `AzureDiscoveryClusterFormationTests` test which has been made to test the discovery-azure plugin with the security manager.\n\nI suspect that a) the test is correct and place the keystore in an accessible place and b) when executed using Gradle and the test framework, the testing classpath contains all the necessary JARs files for the `ServiceLoader` to load the Azure services implementations. I think that using a REST based integration test would have caught the issue.\n", "created_at": "2016-06-28T08:11:28Z" } ], "number": 19062, "title": "Make discovery-azure plugin work again" }
{ "body": "Backport of #19062 on 2.4 branch\nCloses #18637\n", "number": 19116, "review_comments": [], "title": " Make discovery-azure work again on 2.4 branch" }
{ "commits": [ { "message": " Make discovery-azure work again\n\nThe discovery-plugin has been broken since 2.x because the code was not compliant with the security manager and because plugins have been refactored.\n\ncloses #18637, #15630" } ], "files": [ { "diff": "@@ -54,6 +54,7 @@ discovery:\n type: azure\n ----\n \n+WARNING: The keystore file must be placed in a directory accessible by elasticsearch like the `config` directory.\n \n [IMPORTANT]\n .Binding the network host", "filename": "docs/plugins/cloud-azure.asciidoc", "status": "modified" }, { "diff": "@@ -20,13 +20,14 @@\n package org.elasticsearch.cloud.azure.management;\n \n import com.microsoft.windowsazure.Configuration;\n+import com.microsoft.windowsazure.core.Builder;\n+import com.microsoft.windowsazure.core.DefaultBuilder;\n import com.microsoft.windowsazure.core.utils.KeyStoreType;\n import com.microsoft.windowsazure.management.compute.ComputeManagementClient;\n import com.microsoft.windowsazure.management.compute.ComputeManagementService;\n import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse;\n import com.microsoft.windowsazure.management.configuration.ManagementConfiguration;\n import org.elasticsearch.ElasticsearchException;\n-import org.elasticsearch.cloud.azure.AzureServiceDisableException;\n import org.elasticsearch.cloud.azure.AzureServiceRemoteException;\n import org.elasticsearch.common.component.AbstractLifecycleComponent;\n import org.elasticsearch.common.inject.Inject;\n@@ -35,8 +36,12 @@\n import java.io.IOException;\n import java.net.URI;\n import java.net.URISyntaxException;\n+import java.util.ServiceLoader;\n \n-import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.*;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_PASSWORD;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_PATH;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_TYPE;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.SUBSCRIPTION_ID;\n \n /**\n *\n@@ -48,7 +53,7 @@ static final class Azure {\n private static final String ENDPOINT = \"https://management.core.windows.net/\";\n }\n \n- private final ComputeManagementClient computeManagementClient;\n+ private final ComputeManagementClient client;\n private final String serviceName;\n \n @Inject\n@@ -69,29 +74,36 @@ public AzureComputeServiceImpl(Settings settings) {\n }\n KeyStoreType keystoreType = tmpKeyStoreType;\n \n- // Check that we have all needed properties\n- Configuration configuration;\n try {\n- configuration = ManagementConfiguration.configure(new URI(Azure.ENDPOINT),\n+ // Azure SDK configuration uses DefaultBuilder which uses java.util.ServiceLoader to load the\n+ // various Azure services. By default, this will use the current thread's context classloader\n+ // to load services. Since the current thread refers to the main application classloader it\n+ // won't find any Azure service implementation.\n+\n+ // Here we basically create a new DefaultBuilder that uses the current class classloader to load services.\n+ DefaultBuilder builder = new DefaultBuilder();\n+ for (Builder.Exports exports : ServiceLoader.load(Builder.Exports.class, getClass().getClassLoader())) {\n+ exports.register(builder);\n+ }\n+\n+ // And create a new blank configuration based on the previous DefaultBuilder\n+ Configuration configuration = new Configuration(builder);\n+ configuration.setProperty(Configuration.PROPERTY_LOG_HTTP_REQUESTS, logger.isTraceEnabled());\n+\n+ Configuration managementConfig = ManagementConfiguration.configure(null, configuration, new URI(Azure.ENDPOINT),\n subscriptionId, keystorePath, keystorePassword, keystoreType);\n+\n+ logger.debug(\"creating new Azure client for [{}], [{}]\", subscriptionId, serviceName);\n+ client = ComputeManagementService.create(managementConfig);\n } catch (IOException|URISyntaxException e) {\n- logger.error(\"can not start azure client: {}\", e.getMessage());\n- computeManagementClient = null;\n- return;\n+ throw new ElasticsearchException(\"Unable to configure Azure compute service\", e);\n }\n- logger.trace(\"creating new Azure client for [{}], [{}]\", subscriptionId, serviceName);\n- computeManagementClient = ComputeManagementService.create(configuration);\n }\n \n @Override\n public HostedServiceGetDetailedResponse getServiceDetails() {\n- if (computeManagementClient == null) {\n- // Azure plugin is disabled\n- throw new AzureServiceDisableException(\"azure plugin is disabled.\");\n- }\n-\n try {\n- return computeManagementClient.getHostedServicesOperations().getDetailed(serviceName);\n+ return client.getHostedServicesOperations().getDetailed(serviceName);\n } catch (Exception e) {\n throw new AzureServiceRemoteException(\"can not get list of azure nodes\", e);\n }\n@@ -107,9 +119,9 @@ protected void doStop() throws ElasticsearchException {\n \n @Override\n protected void doClose() throws ElasticsearchException {\n- if (computeManagementClient != null) {\n+ if (client != null) {\n try {\n- computeManagementClient.close();\n+ client.close();\n } catch (IOException e) {\n logger.error(\"error while closing Azure client\", e);\n }", "filename": "plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\n2.3\n\n**JVM version**:\n1.8.0_91-b14\n\n**OS version**:\nUbuntu 16.04\n\n**Description of the problem including expected versus actual behavior**:\nI have the following config:\n\n```\ndiscovery:\n type: azure\n\ncloud:\n azure:\n management:\n subscription.id: [...]\n cloud.service.name: [...]\n keystore:\n path: /home/elasticsearch/azurekeystore.pkcs12\n password: [...]\n type: pkcs12\n```\n\nNow when I try to start elasticsearch it fails and I find the following error in the logs:\n\n```\n[2016-05-30 12:59:49,219][ERROR][bootstrap ] Guice Exception: java.security.AccessControlException: access denied (\"java.lang.RuntimePermission\" \"getClassLoader\")\n at java.security.AccessControlContext.checkPermission(AccessControlContext.java:472)\n at java.security.AccessController.checkPermission(AccessController.java:884)\n at java.lang.SecurityManager.checkPermission(SecurityManager.java:549)\n at java.lang.ClassLoader.checkClassLoaderPermission(ClassLoader.java:1528)\n at java.lang.Thread.getContextClassLoader(Thread.java:1436)\n at com.microsoft.windowsazure.Configuration.load(Configuration.java:104)\n at com.microsoft.windowsazure.Configuration.getInstance(Configuration.java:90)\n at com.microsoft.windowsazure.management.configuration.ManagementConfiguration.configure(ManagementConfiguration.java:134)\n at org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl.<init>(AzureComputeServiceImpl.java:75)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)\n at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)\n at java.lang.reflect.Constructor.newInstance(Constructor.java:423)\n at <<<guice>>>\n at org.elasticsearch.node.Node.<init>(Node.java:213)\n at org.elasticsearch.node.Node.<init>(Node.java:140)\n at org.elasticsearch.node.NodeBuilder.build(NodeBuilder.java:143)\n at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:178)\n at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:270)\n at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35)\n```\n\nany idea what I'm doing wrong here?\nIf I leave out the pasted piece of the config, elastic fires up just fine.\n", "comments": [ { "body": "Just want to make sure: are you using elasticsearch 2.3.2 or another version?\n", "created_at": "2016-05-30T13:33:09Z" }, { "body": "@dadoonet it's v2.3.3\n", "created_at": "2016-05-30T13:38:35Z" }, { "body": "So I'm able to reproduce it. I'll create a PR for this and hopefully I'll give a workaround for it.\n", "created_at": "2016-05-30T14:31:56Z" }, { "body": "Thanks! Curious to see the PR to learn what causes it\n\n(and good to know it's not me doing something stupid :) )\n", "created_at": "2016-05-30T18:28:39Z" } ], "number": 18637, "title": "Cloud azure plugin crashes on start" }
{ "body": "Backport of #19062 on 2.4 branch\nCloses #18637\n", "number": 19116, "review_comments": [], "title": " Make discovery-azure work again on 2.4 branch" }
{ "commits": [ { "message": " Make discovery-azure work again\n\nThe discovery-plugin has been broken since 2.x because the code was not compliant with the security manager and because plugins have been refactored.\n\ncloses #18637, #15630" } ], "files": [ { "diff": "@@ -54,6 +54,7 @@ discovery:\n type: azure\n ----\n \n+WARNING: The keystore file must be placed in a directory accessible by elasticsearch like the `config` directory.\n \n [IMPORTANT]\n .Binding the network host", "filename": "docs/plugins/cloud-azure.asciidoc", "status": "modified" }, { "diff": "@@ -20,13 +20,14 @@\n package org.elasticsearch.cloud.azure.management;\n \n import com.microsoft.windowsazure.Configuration;\n+import com.microsoft.windowsazure.core.Builder;\n+import com.microsoft.windowsazure.core.DefaultBuilder;\n import com.microsoft.windowsazure.core.utils.KeyStoreType;\n import com.microsoft.windowsazure.management.compute.ComputeManagementClient;\n import com.microsoft.windowsazure.management.compute.ComputeManagementService;\n import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse;\n import com.microsoft.windowsazure.management.configuration.ManagementConfiguration;\n import org.elasticsearch.ElasticsearchException;\n-import org.elasticsearch.cloud.azure.AzureServiceDisableException;\n import org.elasticsearch.cloud.azure.AzureServiceRemoteException;\n import org.elasticsearch.common.component.AbstractLifecycleComponent;\n import org.elasticsearch.common.inject.Inject;\n@@ -35,8 +36,12 @@\n import java.io.IOException;\n import java.net.URI;\n import java.net.URISyntaxException;\n+import java.util.ServiceLoader;\n \n-import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.*;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_PASSWORD;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_PATH;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_TYPE;\n+import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.SUBSCRIPTION_ID;\n \n /**\n *\n@@ -48,7 +53,7 @@ static final class Azure {\n private static final String ENDPOINT = \"https://management.core.windows.net/\";\n }\n \n- private final ComputeManagementClient computeManagementClient;\n+ private final ComputeManagementClient client;\n private final String serviceName;\n \n @Inject\n@@ -69,29 +74,36 @@ public AzureComputeServiceImpl(Settings settings) {\n }\n KeyStoreType keystoreType = tmpKeyStoreType;\n \n- // Check that we have all needed properties\n- Configuration configuration;\n try {\n- configuration = ManagementConfiguration.configure(new URI(Azure.ENDPOINT),\n+ // Azure SDK configuration uses DefaultBuilder which uses java.util.ServiceLoader to load the\n+ // various Azure services. By default, this will use the current thread's context classloader\n+ // to load services. Since the current thread refers to the main application classloader it\n+ // won't find any Azure service implementation.\n+\n+ // Here we basically create a new DefaultBuilder that uses the current class classloader to load services.\n+ DefaultBuilder builder = new DefaultBuilder();\n+ for (Builder.Exports exports : ServiceLoader.load(Builder.Exports.class, getClass().getClassLoader())) {\n+ exports.register(builder);\n+ }\n+\n+ // And create a new blank configuration based on the previous DefaultBuilder\n+ Configuration configuration = new Configuration(builder);\n+ configuration.setProperty(Configuration.PROPERTY_LOG_HTTP_REQUESTS, logger.isTraceEnabled());\n+\n+ Configuration managementConfig = ManagementConfiguration.configure(null, configuration, new URI(Azure.ENDPOINT),\n subscriptionId, keystorePath, keystorePassword, keystoreType);\n+\n+ logger.debug(\"creating new Azure client for [{}], [{}]\", subscriptionId, serviceName);\n+ client = ComputeManagementService.create(managementConfig);\n } catch (IOException|URISyntaxException e) {\n- logger.error(\"can not start azure client: {}\", e.getMessage());\n- computeManagementClient = null;\n- return;\n+ throw new ElasticsearchException(\"Unable to configure Azure compute service\", e);\n }\n- logger.trace(\"creating new Azure client for [{}], [{}]\", subscriptionId, serviceName);\n- computeManagementClient = ComputeManagementService.create(configuration);\n }\n \n @Override\n public HostedServiceGetDetailedResponse getServiceDetails() {\n- if (computeManagementClient == null) {\n- // Azure plugin is disabled\n- throw new AzureServiceDisableException(\"azure plugin is disabled.\");\n- }\n-\n try {\n- return computeManagementClient.getHostedServicesOperations().getDetailed(serviceName);\n+ return client.getHostedServicesOperations().getDetailed(serviceName);\n } catch (Exception e) {\n throw new AzureServiceRemoteException(\"can not get list of azure nodes\", e);\n }\n@@ -107,9 +119,9 @@ protected void doStop() throws ElasticsearchException {\n \n @Override\n protected void doClose() throws ElasticsearchException {\n- if (computeManagementClient != null) {\n+ if (client != null) {\n try {\n- computeManagementClient.close();\n+ client.close();\n } catch (IOException e) {\n logger.error(\"error while closing Azure client\", e);\n }", "filename": "plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.2\n\n**JVM version**: 1.7.0_67\n\n**OS version**: OSX 10.11.4\n\n**Description of the problem including expected versus actual behavior**:\n\nStart with two indices and an alias for both, the second with a new field introduced:\n\n```\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/1?pretty' -d '{\"when_received\": \"2016-04-25T13:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/2?pretty' -d '{\"when_received\": \"2016-05-28T14:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/3?pretty' -d '{\"when_received\": \"2016-06-28T17:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/4?pretty' -d '{\"when_received\": \"2016-06-29T17:21:24.000Z\"}'\n\ncurl -XPUT 'http://localhost:9200/test_index_2/dates/1?pretty' -d '{\"when_recorded\": \"2016-04-25T13:21:24.000Z\", \"when_received\": \"2015-04-25T13:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_2/dates/2?pretty' -d '{\"when_recorded\": \"2016-05-28T14:21:24.000Z\", \"when_received\": \"2015-05-28T14:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_2/dates/3?pretty' -d '{\"when_recorded\": \"2016-06-28T17:21:24.000Z\", \"when_received\": \"2015-06-28T17:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_2/dates/4?pretty' -d '{\"when_recorded\": \"2016-06-29T17:21:24.000Z\", \"when_received\": \"2015-06-29T17:21:24.000Z\"}'\n\ncurl -XPOST 'http://localhost:9200/test_index_1/_refresh'\ncurl -XPOST 'http://localhost:9200/test_index_2/_refresh'\n\ncurl -XPOST 'http://localhost:9200/_aliases' -d '\n{\n \"actions\" : [\n { \"add\" : { \"index\" : \"test_index_1\", \"alias\" : \"all_indices\" } },\n { \"add\" : { \"index\" : \"test_index_2\", \"alias\" : \"all_indices\" } }\n ]\n}'\n```\n\nI want to do a `date_histogram` aggregation over the alias with `extended_bounds`. The results for each index individually are what I would expect: \n\n```\ncurl -XGET 'http://localhost:9200/test_index_1/_search?pretty' -d '{\n \"size\": 0,\n \"aggs\": \n {\"monthly_date_histogram\": \n {\"date_histogram\": {\"field\": \"when_recorded\", \n \"interval\": \"month\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\"max\": \"now\", \"min\": \"now-5M\"}}}}\n}\n'\n\n{\n \"took\" : 1,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 4,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"monthly_date_histogram\" : {\n \"buckets\" : [ ]\n }\n }\n}\n```\n\n```\ncurl -XGET 'http://localhost:9200/test_index_2/_search?pretty' -d '{\n \"size\": 0,\n \"aggs\": \n {\"monthly_date_histogram\": \n {\"date_histogram\": {\"field\": \"when_recorded\", \n \"interval\": \"month\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\"max\": \"now\", \"min\": \"now-5M\"}}}}\n}\n'\n\n{\n \"took\" : 2,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 4,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"monthly_date_histogram\" : {\n \"buckets\" : [ {\n \"key_as_string\" : \"2016-01-01T00:00:00.000Z\",\n \"key\" : 1451606400000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-02-01T00:00:00.000Z\",\n \"key\" : 1454284800000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-03-01T00:00:00.000Z\",\n \"key\" : 1456790400000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-04-01T00:00:00.000Z\",\n \"key\" : 1459468800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-05-01T00:00:00.000Z\",\n \"key\" : 1462060800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-06-01T00:00:00.000Z\",\n \"key\" : 1464739200000,\n \"doc_count\" : 2\n } ]\n }\n }\n}\n```\n\nHowever, when using the alias, the `extended_bounds` fail: \n\n```\ncurl -XGET 'http://localhost:9200/all_indices/_search?pretty' -d '{\n \"size\": 0,\n \"aggs\": \n {\"monthly_date_histogram\": \n {\"date_histogram\": {\"field\": \"when_recorded\", \n \"interval\": \"month\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\"max\": \"now\", \"min\": \"now-5M\"}}}}\n}\n'\n\n{\n \"took\" : 3,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 10,\n \"successful\" : 10,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 8,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"monthly_date_histogram\" : {\n \"buckets\" : [ {\n \"key_as_string\" : \"2016-04-01T00:00:00.000Z\",\n \"key\" : 1459468800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-05-01T00:00:00.000Z\",\n \"key\" : 1462060800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-06-01T00:00:00.000Z\",\n \"key\" : 1464739200000,\n \"doc_count\" : 2\n } ]\n }\n }\n}\n```\n\nHere's the tricky part: this behavior depends on the actual index name. Same steps, but a different name for the second index: \n\n```\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/1?pretty' -d '{\"when_received\": \"2016-04-25T13:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/2?pretty' -d '{\"when_received\": \"2016-05-28T14:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/3?pretty' -d '{\"when_received\": \"2016-06-28T17:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/test_index_1/dates/4?pretty' -d '{\"when_received\": \"2016-06-29T17:21:24.000Z\"}'\n\ncurl -XPUT 'http://localhost:9200/foobar/dates/1?pretty' -d '{\"when_recorded\": \"2016-04-25T13:21:24.000Z\", \"when_received\": \"2015-04-25T13:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/foobar/dates/2?pretty' -d '{\"when_recorded\": \"2016-05-28T14:21:24.000Z\", \"when_received\": \"2015-05-28T14:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/foobar/dates/3?pretty' -d '{\"when_recorded\": \"2016-06-28T17:21:24.000Z\", \"when_received\": \"2015-06-28T17:21:24.000Z\"}'\ncurl -XPUT 'http://localhost:9200/foobar/dates/4?pretty' -d '{\"when_recorded\": \"2016-06-29T17:21:24.000Z\", \"when_received\": \"2015-06-29T17:21:24.000Z\"}'\n\ncurl -XPOST 'http://localhost:9200/test_index_1/_refresh'\ncurl -XPOST 'http://localhost:9200/foobar/_refresh'\n\ncurl -XPOST 'http://localhost:9200/_aliases' -d '\n{\n \"actions\" : [\n { \"add\" : { \"index\" : \"test_index_1\", \"alias\" : \"all_indices\" } },\n { \"add\" : { \"index\" : \"foobar\", \"alias\" : \"all_indices\" } }\n ]\n}'\n```\n\nThe results for each index: \n\n```\ncurl -XGET 'http://localhost:9200/test_index_1/_search?pretty' -d '{\n \"size\": 0,\n \"aggs\": \n {\"monthly_date_histogram\": \n {\"date_histogram\": {\"field\": \"when_recorded\", \n \"interval\": \"month\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\"max\": \"now\", \"min\": \"now-5M\"}}}}\n}\n'\n\n{\n \"took\" : 1,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 4,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"monthly_date_histogram\" : {\n \"buckets\" : [ ]\n }\n }\n}\n```\n\n```\ncurl -XGET 'http://localhost:9200/foobar/_search?pretty' -d '{\n \"size\": 0,\n \"aggs\": \n {\"monthly_date_histogram\": \n {\"date_histogram\": {\"field\": \"when_recorded\", \n \"interval\": \"month\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\"max\": \"now\", \"min\": \"now-5M\"}}}}\n}\n'\n\n{\n \"took\" : 2,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 4,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"monthly_date_histogram\" : {\n \"buckets\" : [ {\n \"key_as_string\" : \"2016-01-01T00:00:00.000Z\",\n \"key\" : 1451606400000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-02-01T00:00:00.000Z\",\n \"key\" : 1454284800000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-03-01T00:00:00.000Z\",\n \"key\" : 1456790400000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-04-01T00:00:00.000Z\",\n \"key\" : 1459468800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-05-01T00:00:00.000Z\",\n \"key\" : 1462060800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-06-01T00:00:00.000Z\",\n \"key\" : 1464739200000,\n \"doc_count\" : 2\n } ]\n }\n }\n}\n```\n\nExcept this time, the aggregation over the alias works as expected: \n\n```\ncurl -XGET 'http://localhost:9200/all_indices/_search?pretty' -d '{\n \"size\": 0,\n \"aggs\": \n {\"monthly_date_histogram\": \n {\"date_histogram\": {\"field\": \"when_recorded\", \n \"interval\": \"month\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\"max\": \"now\", \"min\": \"now-5M\"}}}}\n}\n'\n\n{\n \"took\" : 3,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 10,\n \"successful\" : 10,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 8,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"monthly_date_histogram\" : {\n \"buckets\" : [ {\n \"key_as_string\" : \"2016-01-01T00:00:00.000Z\",\n \"key\" : 1451606400000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-02-01T00:00:00.000Z\",\n \"key\" : 1454284800000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-03-01T00:00:00.000Z\",\n \"key\" : 1456790400000,\n \"doc_count\" : 0\n }, {\n \"key_as_string\" : \"2016-04-01T00:00:00.000Z\",\n \"key\" : 1459468800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-05-01T00:00:00.000Z\",\n \"key\" : 1462060800000,\n \"doc_count\" : 1\n }, {\n \"key_as_string\" : \"2016-06-01T00:00:00.000Z\",\n \"key\" : 1464739200000,\n \"doc_count\" : 2\n } ]\n }\n }\n}\n```\n\nThe steps to reproduce are above, and I would expect the query against the alias to respect the `extended_bounds` parameter no matter what the index names are. \n", "comments": [ { "body": "Thanks for the clear recreation. Actually, I'd disagree with the output from `test_index_1` being correct. You've asked for extended bounds and yet you get no buckets back at all? I think all buckets should be returned instead.\n\n@colings86 could you take a look please?\n", "created_at": "2016-06-22T09:41:27Z" }, { "body": "Hmm, I haven't yet run your recreation @wrobstory (thanks for such a complete explanation/recreation btw) but it looks like the problem here is that the extended bounds information is not sent back to the coordinating node in the shard response if the shard had no matching documents. We arbitrarily pick one of the responses to use as the guide for the reduce phase (this is actually the first in the list and I wouldn't be surprised if the list is in fact sorted by index name first) so if we choose one which matched no documents it won't do the last step of completing the extended bounds (leading to the weird behaviour where the name of the index makes a difference to the result). In theory it should be an easy fix, to send back the extended bounds information as part of the empty aggregation response. I'll look into making this change soon.\n", "created_at": "2016-06-22T09:57:50Z" }, { "body": "@wrobstory I have raise #19085 to address this issue\n", "created_at": "2016-06-27T09:52:56Z" }, { "body": "Thanks for the great communication and quick turnaround! Much appreciated!\n", "created_at": "2016-06-28T15:22:50Z" } ], "number": 19009, "title": "date_histogram w/ extended_bounds fails on alias/index name" }
{ "body": "Previous to this change the unresolved extended bounds was passed into the histogram aggregator which meant extendedbounds.min and extendedbounds.max was passed through as null. This had two effects on the histogram aggregator:\n1. If the histogram aggregator was unmapped across all shards, the reduce phase would not add buckets for the extended bounds and the response would contain zero buckets\n2. If the histogram aggregator was not unmapped in some shards, the reduce phase might sometimes chose to reduce based on the unmapped shard response and therefore the extended bounds would be ignored.\n\nThis change resolves the extended bounds in the unmapped case and solves the above two issues.\n\nCloses #19009\n", "number": 19085, "review_comments": [], "title": "Pass resolved extended bounds to unmapped histogram aggregator" }
{ "commits": [ { "message": "Pass resolved extended bounds to unmapped histogram aggregator\n\nPrevious to this change the unresolved extended bounds was passed into the histogram aggregator which meant extendedbounds.min and extendedbounds.max was passed through as null. This had two effects on the histogram aggregator:\n\n1. If the histogram aggregator was unmapped across all shards, the reduce phase would not add buckets for the extended bounds and the response would contain zero buckets\n2. If the histogram aggregator was not unmapped in some shards, the reduce phase might sometimes chose to reduce based on the unmapped shard response and therefore the extended bounds would be ignored.\n\nThis change resolves the extended bounds in the unmapped case and solves the above two issues.\n\nCloses #19009" } ], "files": [ { "diff": "@@ -69,9 +69,7 @@ public long minDocCount() {\n @Override\n protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)\n throws IOException {\n- Rounding rounding = createRounding();\n- return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, extendedBounds, null, config.format(),\n- histogramFactory, context, parent, pipelineAggregators, metaData);\n+ return createAggregator(null, parent, pipelineAggregators, metaData);\n }\n \n protected Rounding createRounding() {\n@@ -92,6 +90,11 @@ protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggrega\n if (collectsFromSingleBucket == false) {\n return asMultiBucketAggregator(this, context, parent);\n }\n+ return createAggregator(valuesSource, parent, pipelineAggregators, metaData);\n+ }\n+\n+ private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,\n+ Map<String, Object> metaData) throws IOException {\n Rounding rounding = createRounding();\n // we need to round the bounds given by the user and we have to do it\n // for every aggregator we create", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n import com.carrotsearch.hppc.LongHashSet;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n+import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.script.Script;\n import org.elasticsearch.script.groovy.GroovyPlugin;\n@@ -825,6 +826,83 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception {\n }\n }\n \n+ public void testEmptyWithExtendedBounds() throws Exception {\n+ int lastDataBucketKey = (numValueBuckets - 1) * interval;\n+\n+ // randomizing the number of buckets on the min bound\n+ // (can sometimes fall within the data range, but more frequently will fall before the data range)\n+ int addedBucketsLeft = randomIntBetween(0, numValueBuckets);\n+ long boundsMinKey = addedBucketsLeft * interval;\n+ if (frequently()) {\n+ boundsMinKey = -boundsMinKey;\n+ } else {\n+ addedBucketsLeft = 0;\n+ }\n+ long boundsMin = boundsMinKey + randomIntBetween(0, interval - 1);\n+\n+ // randomizing the number of buckets on the max bound\n+ // (can sometimes fall within the data range, but more frequently will fall after the data range)\n+ int addedBucketsRight = randomIntBetween(0, numValueBuckets);\n+ long boundsMaxKeyDelta = addedBucketsRight * interval;\n+ if (rarely()) {\n+ addedBucketsRight = 0;\n+ boundsMaxKeyDelta = -boundsMaxKeyDelta;\n+ }\n+ long boundsMaxKey = lastDataBucketKey + boundsMaxKeyDelta;\n+ long boundsMax = boundsMaxKey + randomIntBetween(0, interval - 1);\n+\n+\n+ // it could be that the random bounds.min we chose ended up greater than bounds.max - this should cause an\n+ // error\n+ boolean invalidBoundsError = boundsMin > boundsMax;\n+\n+ // constructing the newly expected bucket list\n+ int bucketsCount = numValueBuckets + addedBucketsLeft + addedBucketsRight;\n+ long[] extendedValueCounts = new long[bucketsCount];\n+ System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length);\n+\n+ SearchResponse response = null;\n+ try {\n+ response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.termQuery(\"foo\", \"bar\"))\n+ .addAggregation(histogram(\"histo\")\n+ .field(SINGLE_VALUED_FIELD_NAME)\n+ .interval(interval)\n+ .minDocCount(0)\n+ .extendedBounds(new ExtendedBounds(boundsMin, boundsMax)))\n+ .execute().actionGet();\n+\n+ if (invalidBoundsError) {\n+ fail(\"Expected an exception to be thrown when bounds.min is greater than bounds.max\");\n+ return;\n+ }\n+\n+ } catch (Exception e) {\n+ if (invalidBoundsError) {\n+ // expected\n+ return;\n+ } else {\n+ throw e;\n+ }\n+ }\n+ assertSearchResponse(response);\n+\n+ Histogram histo = response.getAggregations().get(\"histo\");\n+ assertThat(histo, notNullValue());\n+ assertThat(histo.getName(), equalTo(\"histo\"));\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ assertThat(buckets.size(), equalTo(bucketsCount));\n+\n+ long key = Math.min(boundsMinKey, 0);\n+ for (int i = 0; i < bucketsCount; i++) {\n+ Histogram.Bucket bucket = buckets.get(i);\n+ assertThat(bucket, notNullValue());\n+ assertThat(((Number) bucket.getKey()).longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(0L));\n+ key += interval;\n+ }\n+ }\n+\n /**\n * see issue #9634, negative interval in histogram should raise exception\n */", "filename": "modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Environment**\n\nES 5.0.0-alpha3\nLinux/4.4.13-moby/amd64\nOracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_91/25.91-b14\nrunning inside Docker.\n\n**Problem**\n\nI cannot issue \"human\" values for `timeout`any more, e.g. `timeout=1s`.\n\n**Steps to reproduce**:\n\nNotice the `\"timeout\":\"1s\"` here:\n\n```\n$ curl 'localhost:9200/_search' -d '{\"query\":{\"match_all\":{}},\"timeout\":\"1s\"}'\n{\"error\":{\"root_cause\":[{\"type\":\"number_format_exception\",\"reason\":\"For input string: \\\"1s\\\"\"}],\"type\":\"number_format_exception\",\"reason\":\"For input string: \\\"1s\\\"\"},\"status\":400}\n```\n\nNotice the `\"timeout\":\"1000\"` here (same with `1000`, without the quotes btw):\n\n```\n$ curl 'localhost:9200/_search' -d '{\"query\":{\"match_all\":{}},\"timeout\":\"1000\"}'\n{\"took\":1,\"timed_out\":false,\"_shards\":{\"total\":2,\"successful\":2,\"failed\":0},\"hits\":{\"total\":3,\"max_score\":1.0,\"hits\":[{\"_index\":\"elastic-test\",\"_type\":\"tweet\",\"_id\":\"1\",\"_score\":1.0,\"_timestamp\":1466863101867,\"_source\":{\"user\":\"olivere\",\"message\":\"Welcome to Golang and Elasticsearch.\",\"retweets\":108,\"created\":\"2012-12-12T17:38:34Z\"}},{\"_index\":\"elastic-test\",\"_type\":\"tweet\",\"_id\":\"2\",\"_score\":1.0,\"_timestamp\":1466863101879,\"_source\":{\"user\":\"olivere\",\"message\":\"Another unrelated topic.\",\"retweets\":0,\"created\":\"2012-10-10T08:12:03Z\"}},{\"_index\":\"elastic-test\",\"_type\":\"tweet\",\"_id\":\"3\",\"_score\":1.0,\"_timestamp\":1466863101882,\"_source\":{\"user\":\"sandrae\",\"message\":\"Cycling is fun.\",\"retweets\":12,\"created\":\"2011-11-11T10:58:12Z\"}}]}}\n```\n\nCannot find a deprecation notice. Is this expected behavior?\n", "comments": [ { "body": "I don't think it's expected. I would actually expect the opposite: reject time values without unit.\n", "created_at": "2016-06-25T16:01:44Z" }, { "body": "> Cannot find a deprecation notice. Is this expected behavior?\n\nThis is a mistake, thanks for reporting. We require units on time values, so the behavior is completely the opposite of the documented behavior and the behavior in the 2.x series of Elasticsearch. I have opened #19077 to fix. Thanks for reporting! I have marked you as eligible for the [Pioneer Program](https://www.elastic.co/blog/elastic-pioneer-program).\n", "created_at": "2016-06-25T18:15:27Z" }, { "body": "Closed by #19077\n", "created_at": "2016-06-25T20:18:58Z" } ], "number": 19075, "title": "Timeout values like `1s` result in number_format_exception" }
{ "body": "Today when parsing the timeout field in a query body, if time units are\nsupplied the parser throws a NumberFormatException. Addtionally, the\nparsing allows the timeout field to not specify units (it assumes\nmilliseconds). This commit fixes this behavior by not only allowing time\nunits to be specified but requires time units to be specified. This is\nconsistent with the documented behavior and the behavior in 2.x.\n\nCloses #19075\n", "number": 19077, "review_comments": [], "title": "Require timeout units when parsing query body" }
{ "commits": [ { "message": "Require timeout units when parsing query body\n\nToday when parsing the timeout field in a query body, if time units are\nsupplied the parser throws a NumberFormatException. Addtionally, the\nparsing allows the timeout field to not specify units (it assumes\nmilliseconds). This commit fixes this behavior by not only allowing time\nunits to be specified but requires time units to be specified. This is\nconsistent with the documented behavior and the behavior in 2.x." } ], "files": [ { "diff": "@@ -21,7 +21,6 @@\n \n import com.carrotsearch.hppc.ObjectFloatHashMap;\n import com.carrotsearch.hppc.cursors.ObjectCursor;\n-\n import org.elasticsearch.action.support.ToXContentToBytes;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParseField;\n@@ -42,7 +41,6 @@\n import org.elasticsearch.index.query.QueryShardContext;\n import org.elasticsearch.script.Script;\n import org.elasticsearch.search.aggregations.AggregationBuilder;\n-import org.elasticsearch.search.slice.SliceBuilder;\n import org.elasticsearch.search.aggregations.AggregatorFactories;\n import org.elasticsearch.search.aggregations.AggregatorParsers;\n import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;\n@@ -51,6 +49,7 @@\n import org.elasticsearch.search.internal.SearchContext;\n import org.elasticsearch.search.rescore.RescoreBuilder;\n import org.elasticsearch.search.searchafter.SearchAfterBuilder;\n+import org.elasticsearch.search.slice.SliceBuilder;\n import org.elasticsearch.search.sort.ScoreSortBuilder;\n import org.elasticsearch.search.sort.SortBuilder;\n import org.elasticsearch.search.sort.SortBuilders;\n@@ -959,7 +958,7 @@ public void parseXContent(QueryParseContext context, AggregatorParsers aggParser\n } else if (context.getParseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {\n size = parser.intValue();\n } else if (context.getParseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {\n- timeoutInMillis = parser.longValue();\n+ timeoutInMillis = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()).millis();\n } else if (context.getParseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {\n terminateAfter = parser.intValue();\n } else if (context.getParseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {\n@@ -1105,7 +1104,7 @@ public void innerToXContent(XContentBuilder builder, Params params) throws IOExc\n }\n \n if (timeoutInMillis != -1) {\n- builder.field(TIMEOUT_FIELD.getPreferredName(), timeoutInMillis);\n+ builder.field(TIMEOUT_FIELD.getPreferredName(), TimeValue.timeValueMillis(timeoutInMillis).toString());\n }\n \n if (terminateAfter != SearchContext.DEFAULT_TERMINATE_AFTER) {", "filename": "core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.search.builder;\n \n+import org.elasticsearch.ElasticsearchParseException;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n@@ -89,7 +90,9 @@\n \n import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;\n import static org.elasticsearch.test.ClusterServiceUtils.setState;\n+import static org.hamcrest.CoreMatchers.containsString;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.hasToString;\n \n public class SearchSourceBuilderTests extends ESTestCase {\n private static Injector injector;\n@@ -593,6 +596,27 @@ public void testParseRescore() throws IOException {\n }\n }\n \n+ public void testTimeoutWithUnits() throws IOException {\n+ final String timeout = randomTimeValue();\n+ final String query = \"{ \\\"query\\\": { \\\"match_all\\\": {}}, \\\"timeout\\\": \\\"\" + timeout + \"\\\"}\";\n+ try (XContentParser parser = XContentFactory.xContent(query).createParser(query)) {\n+ final SearchSourceBuilder builder = SearchSourceBuilder.fromXContent(createParseContext(parser), aggParsers, suggesters);\n+ assertThat(builder.timeoutInMillis(), equalTo(TimeValue.parseTimeValue(timeout, null, \"timeout\").millis()));\n+ }\n+ }\n+\n+ public void testTimeoutWithoutUnits() throws IOException {\n+ final int timeout = randomIntBetween(1, 1024);\n+ final String query = \"{ \\\"query\\\": { \\\"match_all\\\": {}}, \\\"timeout\\\": \\\"\" + timeout + \"\\\"}\";\n+ try (XContentParser parser = XContentFactory.xContent(query).createParser(query)) {\n+ final ElasticsearchParseException e =\n+ expectThrows(\n+ ElasticsearchParseException.class,\n+ () -> SearchSourceBuilder.fromXContent(createParseContext(parser), aggParsers, suggesters));\n+ assertThat(e, hasToString(containsString(\"unit is missing or unrecognized\")));\n+ }\n+ }\n+\n public void testEmptyPostFilter() throws IOException {\n SearchSourceBuilder builder = new SearchSourceBuilder();\n String query = \"{ \\\"post_filter\\\": {} }\";", "filename": "core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java", "status": "modified" } ] }
{ "body": "I get the following error which takes down my es service:\n\n```\njava.lang.RuntimeException: Service or property not registered: com.microsoft.windowsazure.management.compute.ComputeManagementClient interface com.microsoft.windowsazure.management.compute.ComputeManagementClient\n at com.microsoft.windowsazure.core.DefaultBuilder.build(DefaultBuilder.java:197)\n at com.microsoft.windowsazure.Configuration.create(Configuration.java:113)\n at com.microsoft.windowsazure.management.compute.ComputeManagementService.create(ComputeManagementService.java:47)\n at org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl.<init>(AzureComputeServiceImpl.java:83)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance(Unknown Source)\n at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(Unknown Source)\n at java.lang.reflect.Constructor.newInstance(Unknown Source)\n at <<<guice>>>\n at org.elasticsearch.node.Node.<init>(Node.java:198)\n at org.elasticsearch.node.NodeBuilder.build(NodeBuilder.java:145)\n at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:170)\n at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:270)\n at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35)\n```\n\nElasticsearch: 2.0\nJava: 1.8.0_66\nWindows Server 2012 R2 Datacenter\n", "comments": [ { "body": "Have tried with Java 7 to no avail\n", "created_at": "2015-12-23T12:46:36Z" }, { "body": "Can you share your settings?\n", "created_at": "2015-12-23T13:12:41Z" }, { "body": "my elasticsearch settings?\n\n```\ncluster.name: empactislive\n\nnetwork.bind_host: _en0_\nnetwork.publish_host: _en0_\n\ncloud.azure.management.keystore.path: E:\\azurekeystore.pkcs12\ncloud.azure.management.keystore.type: pkcs12\ncloud.azure.management.keystore.password: ***********\ncloud.azure.management.subscription.id: *******-****-****-****-********\ncloud.azure.management.cloud.service.name: empctslivees\ndiscovery.azure.deployment.name: empctsesvmlive1\ndiscovery.type: azure\n```\n", "created_at": "2015-12-23T13:41:59Z" }, { "body": "I get the same error with elasticsearch 2.1.1\n", "created_at": "2015-12-23T14:03:31Z" }, { "body": "Can you try \n\n```\n\\\\ \n```\n\ninstead of \n\n```\n \\\n```\n\nAnything else in logs?\n", "created_at": "2015-12-23T14:24:24Z" }, { "body": "I can't see anything else in the logs. \nI have tried changing the discovery logging level to trace but that does not give any more information\n", "created_at": "2015-12-23T15:12:32Z" }, { "body": "Tried both \n\n```\n\\\\\n```\n\nand \n\n```\n/\n```\n\nto no avail\n", "created_at": "2015-12-23T16:07:25Z" }, { "body": "I am also seeing a lot of this warning:\n\n```\nJava HotSpot(TM) 64-Bit Server VM warning: Using the ParNew young collector with the Serial old collector is deprecated and will likely be removed in a future release\nException in thread \"main\" puteManagementClient\n at com.microsoft.windowsazure.core.DefaultBuilder.build(DefaultBuilder.java:197)\n at com.microsoft.windowsazure.Configuration.create(Configuration.java:113)\n at com.microsoft.windowsazure.management.compute.ComputeManagementService.create(ComputeManagementService.java:47)\n at org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl.<init>(AzureComputeServiceImpl.java:83)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance(Unknown Source)\n at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(Unknown Source)\n at java.lang.reflect.Constructor.newInstance(Unknown Source)\n at <<<guice>>>\n at org.elasticsearch.node.Node.<init>(Node.java:198)\n at org.elasticsearch.node.NodeBuilder.build(NodeBuilder.java:145)\n at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:170)\n at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:270)\n at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35)\n```\n", "created_at": "2015-12-23T16:08:11Z" }, { "body": "im getting the same issue with 2.1.1, and the same confguration as above, not sure if its to do with the fact i installed the 'cloud-azure' plugin instead of 'discovery-azure' as it says in the latest docs. discovery-azure gave me a file not found exception as the zip doesnt exist\n", "created_at": "2015-12-23T22:33:37Z" }, { "body": "@hisuwh Just to make sure about the source of your issue, can you move the pkcs12 file on the same drive as you have elasticsearch instance (I guess it could be for example `C:`) and then change your `elasticsearch.yml` accordingly:\n\n``` yaml\ncloud.azure.management.keystore.path: /azurekeystore.pkcs12\n```\n", "created_at": "2015-12-30T11:35:11Z" }, { "body": "I have been away for Christmas as you might expect. I will try this out at some point and let you know if this helps\n", "created_at": "2016-01-06T09:56:32Z" }, { "body": "I have tried moving my keystore to the c drive but this makes no difference\n", "created_at": "2016-01-12T16:50:16Z" }, { "body": "@hisuwh And your `elasticsearch.yml` is now?\n\n``` yml\ncloud.azure.management.keystore.path: /azurekeystore.pkcs12\n```\n", "created_at": "2016-01-12T16:55:56Z" }, { "body": "yes\n", "created_at": "2016-01-12T17:06:15Z" }, { "body": "I am sorry for the bump.\nBut any more ideas on this?\nThis massively blocking us at the moment.\nHas anyone your end tried to reproduce this? I don't know if its a bug with your tools or in my setup.\nI have the same setup on 3 other clusters except they are on older versions of elasticsearch and they all work.\n\nAny help would be much appreciated.\nThanks\n", "created_at": "2016-01-14T09:47:13Z" }, { "body": "I am having the same problem on elasticsearch 2.1.1 (running on ubuntu 15.04 installed from repositories). My config is as follows:\n\n``` yaml\ncluster.name: \"mycluster\"\nbootstrap.mlockall: true\npath:\n logs: /var/log/elasticsearch\n data: /datadrive/elasticsearch/data\ncloud:\n azure:\n management:\n keystore:\n path: /etc/elasticsearch/mykeystorefile.pkcs12\n password: password\n subscription.id: GUID\n cloud.service.name: myservicename\n storage:\n account: mystorageaccount\n key: mystorageaccountkey\ndiscovery:\n type: azure\nrefresh_interval: 30s\n```\n\nThis is what i have in the log:\n\n```\n[2016-01-20 11:51:36,846][WARN ][bootstrap ] If you are logged in interactively, you will have to re-login for the new limits to take effect.\n[2016-01-20 11:51:37,104][INFO ][node ] [Forgotten One] version[2.1.1], pid[17658], build[40e2c53/2015-12-15T13:05:55Z]\n[2016-01-20 11:51:37,104][INFO ][node ] [Forgotten One] initializing ...\n[2016-01-20 11:51:37,555][INFO ][plugins ] [Forgotten One] loaded [cloud-azure], sites [head]\n[2016-01-20 11:51:37,585][INFO ][env ] [Forgotten One] using [1] data paths, mounts [[/datadrive (/dev/sdc1)]], net usable_space [121.2gb], net total_space [127.8gb], spins? [possibly], types [ext4]\n[2016-01-20 11:51:41,140][ERROR][bootstrap ] Guice Exception: java.lang.RuntimeException: Service or property not registered: com.microsoft.windowsazure.management.compute.ComputeManagementClient interface com.microsoft.windowsazure.management.compute.ComputeManagementClient\n at com.microsoft.windowsazure.core.DefaultBuilder.build(DefaultBuilder.java:197)\n at com.microsoft.windowsazure.Configuration.create(Configuration.java:113)\n at com.microsoft.windowsazure.management.compute.ComputeManagementService.create(ComputeManagementService.java:47)\n at org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl.<init>(AzureComputeServiceImpl.java:83)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)\n at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)\n at java.lang.reflect.Constructor.newInstance(Constructor.java:422)\n at <<<guice>>>\n at org.elasticsearch.node.Node.<init>(Node.java:200)\n at org.elasticsearch.node.Node.<init>(Node.java:128)\n at org.elasticsearch.node.NodeBuilder.build(NodeBuilder.java:145)\n at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:178)\n at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:285)\n at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35)\n```\n\nAny pointers will be greatly appreciated\n[Update] I've isolated the code for creating the ComputeManagementClient in a small application to verify that my settings are correct. It is ran on the same machine where the elasticsearch is installed and uses the same values as from my configuration. The client is successfully instantiated without throwing exceptions:\n\n``` java\nimport com.microsoft.windowsazure.Configuration;\nimport com.microsoft.windowsazure.core.utils.KeyStoreType;\nimport com.microsoft.windowsazure.management.compute.ComputeManagementClient;\nimport com.microsoft.windowsazure.management.compute.ComputeManagementService;\nimport com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse;\nimport com.microsoft.windowsazure.management.configuration.ManagementConfiguration;\nimport java.io.IOException;\nimport java.net.URI;\nimport java.net.URISyntaxException;\n\nclass azuretest {\n static final class Azure {\n private static final String ENDPOINT = \"https://management.core.windows.net/\";\n }\n public static void main(String[] args) {\n String subscriptionId = \"GUID\";\n String serviceName = \"mysubscriptionname\";\n String keystorePath = \"/etc/elasticsearch/mykeystorefile.pkcs12\";\n String keystorePassword = \"password\";\n Configuration configuration;\n try{\n configuration = ManagementConfiguration.configure(new URI(Azure.ENDPOINT),\n subscriptionId, keystorePath, keystorePassword, KeyStoreType.pkcs12);\n } catch (IOException|URISyntaxException e) {\n System.out.println(e.getMessage());\n return;\n }\n System.out.println(configuration.toString());\n ComputeManagementClient computeManagementClient = ComputeManagementService.create(configuration);\n System.out.println(computeManagementClient.getApiVersion());\n\n }\n}\n```\n\nThere is one difference though - here I am using Azure SDK version 0.9.0\n", "created_at": "2016-01-20T12:00:15Z" }, { "body": "@dadoonet - any more thoughts? We are currently running a 1 node cluster in live because of this which presents a pretty huge risk\n", "created_at": "2016-01-28T09:55:58Z" }, { "body": "@hisuwh I've worked around by disabling azure discovery and relying on unicast, by hardcoding the nodes.\n", "created_at": "2016-01-28T13:02:58Z" }, { "body": "Of course, that is a good point. Thanks\nGood workaround though by no means a solution\n", "created_at": "2016-01-28T13:10:02Z" }, { "body": "@gmarz are you familiar with the azure plugin? Is this something you could investigate?\n", "created_at": "2016-02-29T20:24:19Z" }, { "body": "@hisuwh @georgievrado Any chance you could turn on `TRACE` level? \n\nIn your `logging.yml`, add:\n\n``` yml\n cloud.azure: TRACE\n discovery.azure: TRACE\n```\n\nIt might give us some ideas about what is happening. Thanks!\n", "created_at": "2016-03-05T11:25:17Z" }, { "body": "This issue is closed in c557663b90dc3ac3f0ae7391cb6eac543812a946\n\nPlease note that since ES 2.0, the keystore used by the Azure plugin must be placed in a directory accessible by the elasticsearch process. I suggest to place it in the `config` directory.\n", "created_at": "2016-07-01T08:21:10Z" } ], "number": 15630, "title": "cloud-azure plugin not working " }
{ "body": "This `discovery-azure` plugin is broken since 2.x. This commit fixes the plugin so that it can work with the security manager and uses the right classloader when loading its Azure services.\n\nIt's just a fix because testing these things on Azure are very time consuming, but we **really really** need to automatically test this correctly. It's a shame it's been broken for so long.\n\nNote: 2.x fix is similar and will follow soon.\n\nRelated to #18637, #15630\n", "number": 19062, "review_comments": [ { "body": "Indeed. That's exactly what was blocking me in my branch. It makes sense to me to have that in config.\n", "created_at": "2016-06-24T15:43:45Z" }, { "body": "I'd write either \n\n``` java\nif (logger.isTraceEnabled()) {\n configuration.setProperty(Configuration.PROPERTY_LOG_HTTP_REQUESTS, true);\n}\n```\n\nOr\n\n``` java\nconfiguration.setProperty(Configuration.PROPERTY_LOG_HTTP_REQUESTS, logger.isTraceEnabled());\n```\n\nNo?\n", "created_at": "2016-06-24T15:45:47Z" }, { "body": "I think we can now remove this condition as the client can not be null because we throw now `new ElasticsearchException(\"Unable to configure Azure compute service\", e);` in the CTOR\n", "created_at": "2016-06-24T15:47:12Z" }, { "body": "Yes, it is a leftover.\n", "created_at": "2016-06-27T07:08:30Z" }, { "body": "yes\n", "created_at": "2016-06-27T07:09:04Z" } ], "title": "Make discovery-azure plugin work again" }
{ "commits": [ { "message": "Make discovery-azure work again\n\nThe discovery-plugin has been broken since 2.x because the code was not compliant with the security manager and because plugins have been refactored.\n\ncloses #18637, #15630" } ], "files": [ { "diff": "@@ -56,9 +56,11 @@ discovery:\n .Binding the network host\n ==============================================\n \n+WARNING: The keystore file must be placed in a directory accessible by elasticsearch like the `config` directory.\n+\n It's important to define `network.host` as by default it's bound to `localhost`.\n \n-You can use {ref}/modules-network.html[core network host settings]. For example `_non_loopback_` or `_en0_`.\n+You can use {ref}/modules-network.html[core network host settings]. For example `_en0_`.\n \n ==============================================\n ", "filename": "docs/plugins/discovery-azure.asciidoc", "status": "modified" }, { "diff": "@@ -20,27 +20,29 @@\n package org.elasticsearch.cloud.azure.management;\n \n import com.microsoft.windowsazure.Configuration;\n+import com.microsoft.windowsazure.core.Builder;\n+import com.microsoft.windowsazure.core.DefaultBuilder;\n import com.microsoft.windowsazure.core.utils.KeyStoreType;\n import com.microsoft.windowsazure.management.compute.ComputeManagementClient;\n import com.microsoft.windowsazure.management.compute.ComputeManagementService;\n import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse;\n import com.microsoft.windowsazure.management.configuration.ManagementConfiguration;\n import org.elasticsearch.ElasticsearchException;\n-import org.elasticsearch.cloud.azure.AzureServiceDisableException;\n import org.elasticsearch.cloud.azure.AzureServiceRemoteException;\n import org.elasticsearch.common.component.AbstractLifecycleComponent;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n \n import java.io.IOException;\n+import java.util.ServiceLoader;\n \n /**\n *\n */\n public class AzureComputeServiceImpl extends AbstractLifecycleComponent<AzureComputeServiceImpl>\n implements AzureComputeService {\n \n- private final ComputeManagementClient computeManagementClient;\n+ private final ComputeManagementClient client;\n private final String serviceName;\n \n @Inject\n@@ -54,28 +56,36 @@ public AzureComputeServiceImpl(Settings settings) {\n KeyStoreType keystoreType = Management.KEYSTORE_TYPE_SETTING.get(settings);\n \n logger.trace(\"creating new Azure client for [{}], [{}]\", subscriptionId, serviceName);\n- ComputeManagementClient result;\n try {\n- // Check that we have all needed properties\n- Configuration configuration = ManagementConfiguration.configure(Management.ENDPOINT_SETTING.get(settings),\n- subscriptionId, keystorePath, keystorePassword, keystoreType);\n- result = ComputeManagementService.create(configuration);\n+ // Azure SDK configuration uses DefaultBuilder which uses java.util.ServiceLoader to load the\n+ // various Azure services. By default, this will use the current thread's context classloader\n+ // to load services. Since the current thread refers to the main application classloader it\n+ // won't find any Azure service implementation.\n+\n+ // Here we basically create a new DefaultBuilder that uses the current class classloader to load services.\n+ DefaultBuilder builder = new DefaultBuilder();\n+ for (Builder.Exports exports : ServiceLoader.load(Builder.Exports.class, getClass().getClassLoader())) {\n+ exports.register(builder);\n+ }\n+\n+ // And create a new blank configuration based on the previous DefaultBuilder\n+ Configuration configuration = new Configuration(builder);\n+ configuration.setProperty(Configuration.PROPERTY_LOG_HTTP_REQUESTS, logger.isTraceEnabled());\n+\n+ Configuration managementConfig = ManagementConfiguration.configure(null, configuration,\n+ Management.ENDPOINT_SETTING.get(settings), subscriptionId, keystorePath, keystorePassword, keystoreType);\n+\n+ logger.debug(\"creating new Azure client for [{}], [{}]\", subscriptionId, serviceName);\n+ client = ComputeManagementService.create(managementConfig);\n } catch (IOException e) {\n- logger.error(\"can not start azure client: {}\", e.getMessage());\n- result = null;\n+ throw new ElasticsearchException(\"Unable to configure Azure compute service\", e);\n }\n- this.computeManagementClient = result;\n }\n \n @Override\n public HostedServiceGetDetailedResponse getServiceDetails() {\n- if (computeManagementClient == null) {\n- // Azure plugin is disabled\n- throw new AzureServiceDisableException(\"azure plugin is disabled.\");\n- }\n-\n try {\n- return computeManagementClient.getHostedServicesOperations().getDetailed(serviceName);\n+ return client.getHostedServicesOperations().getDetailed(serviceName);\n } catch (Exception e) {\n throw new AzureServiceRemoteException(\"can not get list of azure nodes\", e);\n }\n@@ -91,9 +101,9 @@ protected void doStop() throws ElasticsearchException {\n \n @Override\n protected void doClose() throws ElasticsearchException {\n- if (computeManagementClient != null) {\n+ if (client != null) {\n try {\n- computeManagementClient.close();\n+ client.close();\n } catch (IOException e) {\n logger.error(\"error while closing Azure client\", e);\n }", "filename": "plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java", "status": "modified" } ] }
{ "body": "Related to #18945 and to this https://github.com/elastic/elasticsearch/commit/35d3bdab84fa05c71e8ae019aaf661759c8b1622#commitcomment-17914150\n\nIn GCS Repository plugin we defined a `service_account` setting which is defined as `Property.Filtered`.\n\n``` java\npublic static final Setting<String> SERVICE_ACCOUNT = simpleString(\"service_account\", Property.NodeScope, Property.Dynamic, Property.Filtered);\n```\n\nBut as this setting is not registered when the plugin starts, it's not shaded when you ask for `GET _snapshot/gcsrepo`.\n\nWe should either remove `Filtered` if it does not make any sense to filter it or register the setting or wait for a fix for #18945.\n\nIf we want to keep it, we can add a REST test like this one: https://github.com/elastic/elasticsearch/blob/master/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml\n\n``` yaml\n# Integration tests for Repository GCS component\n#\n\"GCS repository can be registered\":\n - do:\n snapshot.create_repository:\n repository: test_repo_gcs_1\n verify: false\n body:\n type: gcs\n settings:\n service_account: \"whatever\"\n\n # Get repository\n - do:\n snapshot.get_repository:\n repository: test_repo_gcs_1\n\n - is_true: test_repo_gcs_1\n - is_false: test_repo_gcs_1.settings.service_account\n```\n", "comments": [ { "body": "Note: `service_account` point to the service account file which contains the necessary credentials to authenticate against Google Cloud Storage. The unfiltered value might show something like `service_account: /path/to/iam-googleaccount-whatever@google.com` but it is not sensitive data like the credentials.\n", "created_at": "2016-06-20T07:31:34Z" } ], "number": 18946, "title": "service_account is not filtered in GCS repository" }
{ "body": "Related to #18945 and to this https://github.com/elastic/elasticsearch/commit/35d3bdab84fa05c71e8ae019aaf661759c8b1622#commitcomment-17914150\n\nIn GCS Repository plugin we defined a `service_account` setting which is defined as `Property.Filtered`.\nIt's not needed as it's only a path to a file.\n\nCloses #18946\n", "number": 19058, "review_comments": [], "title": "Remove settings filtering for service_account in GCS repository" }
{ "commits": [ { "message": "Remove settings filtering for service_account in GCS repository\n\nRelated to #18945 and to this https://github.com/elastic/elasticsearch/commit/35d3bdab84fa05c71e8ae019aaf661759c8b1622#commitcomment-17914150\n\nIn GCS Repository plugin we defined a `service_account` setting which is defined as `Property.Filtered`.\nIt's not needed as it's only a path to a file.\n\nCloses #18946" } ], "files": [ { "diff": "@@ -62,7 +62,7 @@ public class GoogleCloudStorageRepository extends BlobStoreRepository {\n public static final Setting<String> APPLICATION_NAME =\n new Setting<>(\"application_name\", GoogleCloudStoragePlugin.NAME, Function.identity(), Property.NodeScope, Property.Dynamic);\n public static final Setting<String> SERVICE_ACCOUNT =\n- simpleString(\"service_account\", Property.NodeScope, Property.Dynamic, Property.Filtered);\n+ simpleString(\"service_account\", Property.NodeScope, Property.Dynamic);\n public static final Setting<TimeValue> HTTP_READ_TIMEOUT =\n timeSetting(\"http.read_timeout\", NO_TIMEOUT, Property.NodeScope, Property.Dynamic);\n public static final Setting<TimeValue> HTTP_CONNECT_TIMEOUT =", "filename": "plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\n\n5.0.0~alpha3\n\n**JVM version**:\n\njava version \"1.8.0_91\"\nJava(TM) SE Runtime Environment (build 1.8.0_91-b14)\nJava HotSpot(TM) 64-Bit Server VM (build 25.91-b14, mixed mode)\n\n**OS version**:\n\nLinux ip-10-10-155-146 3.13.0-74-generic #118-Ubuntu SMP Thu Dec 17 22:52:10 UTC 2015 x86_64 x86_64 x86_64 GNU/Linux\n\n**Description of the problem including expected versus actual behavior**:\n\n**Steps to reproduce**:\nCreate an index with two mappings in a parent/child relationship. e.g.\n\n``` bash\n$ curl -XPUT 'http://localhost:9200/foo/' -d '{\"settings\":{\"number_of_shards\":1},\"mappings\":{\"blog\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}},\"tag\":{\"_parent\":{\"type\":\"blog\"},\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}'\n{\"acknowledged\":true}\n```\n\nMake this search query, with the `query` key preceding the `has_child` key in the `should` clause.\n\n``` bash\n$ curl 'http://localhost:9200/foo/_search' -d '{\"query\":{\"bool\":{\"should\":{\"query\":{\"query_string\":{\"query\":\"go\"}},\"has_child\":{\"query\":{\"query_string\":{\"query\":\"go\"}},\"child_type\":\"tag\"}}}}}'\n{\"error\":{\"root_cause\":[{\"type\":\"parsing_exception\",\"reason\":\"no [query] registered for [query]\",\"line\":1,\"col\":37}],\"type\":\"parsing_exception\",\"reason\":\"no [query] registered for [query]\",\"line\":1,\"col\":37},\"status\":400}\n```\n\nBut switching the key order makes it work. According to http://json.org/ this serialization represents the exact same object as before.\n\n``` bash\n$ curl 'http://localhost:9200/foo/_search' -d '{\"query\":{\"bool\":{\"should\":{\"has_child\":{\"query\":{\"query_string\":{\"query\":\"go\"}},\"child_type\":\"tag\"},\"query\":{\"query_string\":{\"query\":\"go\"}}}}}}'\n{\"took\":102,\"timed_out\":false,\"_shards\":{\"total\":1,\"successful\":1,\"failed\":0},\"hits\":{\"total\":0,\"max_score\":null,\"hits\":[]}}\n```\n\n**Provide logs (if relevant)**:\nNo logs appear to be generated for the error.\n", "comments": [ { "body": "Hi @nanotone \n\nThanks for trying out the alpha and for reporting this bug. The bug isn't what you think: the `query` query no longer exists, so it is actually the SECOND query which isn't failing correctly.\n", "created_at": "2016-06-23T11:49:10Z" }, { "body": "To reformulate the problem slightly, when querying something like:\n\n```\nGET /foo/_search\n{\n \"query\": {\n \"bool\": {\n \"must\": { \n \"unknown_query\" : {\n \"match\": {\n \"fuz\": \"buzz\"\n }\n }\n }\n }\n }\n}\n```\n\nwe correctly get the error stated above: `\"type\": \"parsing_exception\", \"reason\": \"no [query] registered for [unknown_query]\"`, but if we add a valid query before the invalid one, like the following, we don't get an error:\n\n```\nGET /foo/_search\n{\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match\": {\n \"fizz\": \"bizz\"\n },\n \"unknown_query\" : \n {\n \"match\": {\n \"fuz\": \"buzz\"\n }\n }\n }\n }\n }\n}\n```\n", "created_at": "2016-06-23T13:02:03Z" }, { "body": "@clintongormley shouldn't more than one query clauses be specified as an array, or is `\"must\" : { \"query1\" : { ... }, \"query2\" : { ... } }` supported? Because if it is, I think that's whats not working.\n", "created_at": "2016-06-23T14:11:29Z" }, { "body": "@cbuescher yes absolutely right, so it's just ignoring whatever is left in that object\n", "created_at": "2016-06-23T14:13:25Z" } ], "number": 19034, "title": "Specific JSON key ordering causes search query failure" }
{ "body": "Currently we don't throw an error when there is more than one query clause specified in a must/must_not/should/filter object of the bool query without\nusing array notation, e.g.: \n\n```\n {\n \"bool\": {\n \"must\": {\n \"match\" : { ... },\n \"match\" : { ... }\n }\n }\n}\n```\n\nIn these cases, only the first query will be parsed correctly, possibly leading to silently ignoring the rest of the query.\nInstead we should throw a ParsingException if we don't encounter an END_OBJECT token after having parsed the query clause.\n\nCloses #19034\n", "number": 19052, "review_comments": [], "title": "Make parsing of bool queries stricter" }
{ "commits": [ { "message": "Make parsing of bool queries stricter\n\nCurrently we don't throw an error when there is more than one query clause\nspecified in a must/must_not/should/filter object of the bool query without\nusing array notation, e.g.:\n { \"bool\" : { \"must\" : { \"match\" : { ... }, \"match\": { ... }}}}\n\nIn these cases, only the first query will be parsed and further behaviour is\nunspecified, possibly leading to silently ignoring the rest of the query.\nInstead we should throw a ParsingException if we don't encounter an END_OBJECT\ntoken after having parsed the query clause." } ], "files": [ { "diff": "@@ -340,6 +340,10 @@ public static Optional<BoolQueryBuilder> fromXContent(QueryParseContext parseCon\n default:\n throw new ParsingException(parser.getTokenLocation(), \"[bool] query does not support [\" + currentFieldName + \"]\");\n }\n+ if (parser.currentToken() != XContentParser.Token.END_OBJECT) {\n+ throw new ParsingException(parser.getTokenLocation(),\n+ \"expected [END_OBJECT] but got [{}], possibly too many query clauses\", parser.currentToken());\n+ }\n } else if (token == XContentParser.Token.START_ARRAY) {\n while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n switch (currentFieldName) {", "filename": "core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java", "status": "modified" }, { "diff": "@@ -25,6 +25,7 @@\n import org.apache.lucene.search.MatchAllDocsQuery;\n import org.apache.lucene.search.Query;\n import org.elasticsearch.common.ParseFieldMatcher;\n+import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n@@ -370,6 +371,28 @@ public void testFromJsonEmptyQueryBody() throws IOException {\n assertThat(ex.getMessage(), startsWith(\"query malformed, empty clause found at\"));\n }\n \n+ /**\n+ * test that unknown query names in the clauses throw an error\n+ */\n+ public void testUnknownQueryName() throws IOException {\n+ String query = \"{\\\"bool\\\" : {\\\"must\\\" : { \\\"unknown_query\\\" : { } } } }\";\n+\n+ ParsingException ex = expectThrows(ParsingException.class, () -> parseQuery(query, ParseFieldMatcher.EMPTY));\n+ assertEquals(\"no [query] registered for [unknown_query]\", ex.getMessage());\n+ }\n+\n+ /**\n+ * test that two queries in object throws error\n+ */\n+ public void testTooManyQueriesInObject() throws IOException {\n+ String clauseType = randomFrom(new String[] {\"must\", \"should\", \"must_not\", \"filter\"});\n+ // should also throw error if invalid query is preceded by a valid one\n+ String query = \"{\\\"bool\\\" : {\\\"\" + clauseType\n+ + \"\\\" : { \\\"match\\\" : { \\\"foo\\\" : \\\"bar\\\" } , \\\"match\\\" : { \\\"baz\\\" : \\\"buzz\\\" } } } }\";\n+ ParsingException ex = expectThrows(ParsingException.class, () -> parseQuery(query, ParseFieldMatcher.EMPTY));\n+ assertEquals(\"expected [END_OBJECT] but got [FIELD_NAME], possibly too many query clauses\", ex.getMessage());\n+ }\n+\n public void testRewrite() throws IOException {\n BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();\n boolean mustRewrite = false;", "filename": "core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java", "status": "modified" } ] }
{ "body": "When doing a `date_histogram` aggregation with `\"format\":\"epoch_millis\"`, the aggregation response key shows the correct value as UTC time stamp, but the `key_as_string` does not reflect that key if a `time_zone` other than UTC is used. Reproduced on 2.3 and master:\n\n```\nPUT /foo\n\nPUT /foo/_mapping/bar\n{\n \"properties\":{\n \"timestamp\":{\n \"type\":\"date\",\n \"format\":\"epoch_millis\"\n }\n }\n}\n\nPOST /foo/bar/1\n{\n \"timestamp\": 1463875200000\n}\n\nPOST /foo/bar/_search\n{\n \"aggs\": {\n \"ts\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"interval\": \"month\",\n \"time_zone\": \"+01:00\"\n }\n }\n }\n}\n```\n\nThe aggregation in the response:\n\n```\n\"aggregations\": {\n \"ts\": {\n \"buckets\": [\n {\n \"key_as_string\": \"1462060800000\",\n \"key\": 1462057200000,\n \"doc_count\": 1\n }\n ]\n }\n }\n```\n\nThe key correctly represents the date `2016-05-01T00:00:00.000+01:00`, the expectation would for `epoch_millis` format that the `key_as_string` is the same value.\nSame problem happens for `epoch_second` format. With other formats like `date_optional_time` or custom formats `key_as_string` changes correctly according to the keys value.\n", "comments": [], "number": 19038, "title": "Incorrect 'key_as_string' response in date_histogram when using 'epoch_millis' and 'time_zone'" }
{ "body": "When doing a `date_histogram` aggregation with `\"format\":\"epoch_millis\"` or `\"format\" : \"epoch_second\"` and using a time zone other than UTC, the `key_as_string` ouput in the response does not reflect the UTC timestamp that is used as the key. This happens because when applying the `time_zone` in DocValueFormat.DateTime to an epoch-based formatter, this adds the time zone offset to the value being formated. Instead we should adjust the added display offset to get back the utc instance in EpochTimePrinter.\n\nCloses #19038\n", "number": 19043, "review_comments": [ { "body": "do we really need a special case here or could we fix the printer impl for these formats to apply the timezone when printing a date?\n", "created_at": "2016-06-23T21:08:50Z" }, { "body": "I looked into this some more. It's problematic, the actual joda EpochPrinter prints whatever time instant (long) is given to it, the time zone adjustment happens in joda DateTimeFormatter.printTo(Appendable, long, Chronology), and if we use `withZone()` (which we need for other format to work correct) the time zone offset gets added. I don't see another way around this atm. but am open to suggestions where to dig further if you have ideas.\n", "created_at": "2016-06-24T11:40:30Z" }, { "body": "I'm wondering that this might be something that should be fixed upstream?\n", "created_at": "2016-06-27T12:44:53Z" }, { "body": "Sorry, I was mistaken when I thought EpochTimePrinter is part of Joda, its in our code base and we can fix the problem there. I will rebase and update this PR.\n", "created_at": "2016-06-28T10:40:13Z" }, { "body": "maybe leave a comment where this comes from?\n", "created_at": "2016-06-28T14:01:45Z" } ], "title": "Fix \"key_as_string\" for date histogram and epoch_millis/epoch_second format with time zone" }
{ "commits": [ { "message": "Fix key_as_string for date histogram and epoch_millis/epoch_second format\n\nWhen doing a `date_histogram` aggregation with `\"format\":\"epoch_millis\"` or\n`\"format\" : \"epoch_second\"` and using a time zone other than UTC, the\n`key_as_string` ouput in the response does not reflect the UTC timestamp that is\nused as the key. This happens because when applying the `time_zone` in\nDocValueFormat.DateTime to an epoch-based formatter, this adds the time zone\noffset to the value being formated. Instead we should adjust the added display\noffset to get back the utc instance in EpochTimePrinter.\n\nCloses #19038" } ], "files": [ { "diff": "@@ -43,7 +43,6 @@\n import java.io.IOException;\n import java.io.Writer;\n import java.util.Locale;\n-import java.util.regex.Pattern;\n \n /**\n *\n@@ -375,21 +374,30 @@ public int estimatePrintedLength() {\n return hasMilliSecondPrecision ? 19 : 16;\n }\n \n+\n+ /**\n+ * We adjust the instant by displayOffset to adjust for the offset that might have been added in\n+ * {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone.\n+ */\n @Override\n public void printTo(StringBuffer buf, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) {\n if (hasMilliSecondPrecision) {\n- buf.append(instant);\n+ buf.append(instant - displayOffset);\n } else {\n- buf.append(instant / 1000);\n+ buf.append((instant - displayOffset) / 1000);\n }\n }\n \n+ /**\n+ * We adjust the instant by displayOffset to adjust for the offset that might have been added in\n+ * {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone.\n+ */\n @Override\n public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) throws IOException {\n if (hasMilliSecondPrecision) {\n- out.write(String.valueOf(instant));\n+ out.write(String.valueOf(instant - displayOffset));\n } else {\n- out.append(String.valueOf(instant / 1000));\n+ out.append(String.valueOf((instant - displayOffset) / 1000));\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/joda/Joda.java", "status": "modified" }, { "diff": "@@ -115,6 +115,7 @@ public double parseDouble(String value, boolean roundUp, Callable<Long> now) {\n return Double.parseDouble(value);\n }\n \n+ @Override\n public BytesRef parseBytesRef(String value) {\n return new BytesRef(value);\n }", "filename": "core/src/main/java/org/elasticsearch/search/DocValueFormat.java", "status": "modified" }, { "diff": "@@ -47,6 +47,7 @@\n import java.util.Arrays;\n import java.util.Collection;\n import java.util.HashMap;\n+import java.util.Iterator;\n import java.util.List;\n import java.util.Map;\n import java.util.concurrent.Callable;\n@@ -237,6 +238,46 @@ public void testSingleValuedFieldWithTimeZone() throws Exception {\n assertThat(bucket.getDocCount(), equalTo(1L));\n }\n \n+ public void testSingleValued_timeZone_epoch() throws Exception {\n+ String format = randomBoolean() ? \"epoch_millis\" : \"epoch_second\";\n+ int millisDivider = format.equals(\"epoch_millis\") ? 1 : 1000;\n+ if (randomBoolean()) {\n+ format = format + \"||date_optional_time\";\n+ }\n+ DateTimeZone tz = DateTimeZone.forID(\"+01:00\");\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .addAggregation(dateHistogram(\"histo\").field(\"date\")\n+ .dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1)\n+ .timeZone(tz).format(format))\n+ .execute()\n+ .actionGet();\n+ assertSearchResponse(response);\n+\n+ Histogram histo = response.getAggregations().get(\"histo\");\n+ assertThat(histo, notNullValue());\n+ assertThat(histo.getName(), equalTo(\"histo\"));\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ assertThat(buckets.size(), equalTo(6));\n+\n+ List<DateTime> expectedKeys = new ArrayList<>();\n+ expectedKeys.add(new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC));\n+ expectedKeys.add(new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC));\n+ expectedKeys.add(new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC));\n+ expectedKeys.add(new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC));\n+ expectedKeys.add(new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC));\n+ expectedKeys.add(new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC));\n+\n+\n+ Iterator<DateTime> keyIterator = expectedKeys.iterator();\n+ for (Histogram.Bucket bucket : buckets) {\n+ assertThat(bucket, notNullValue());\n+ DateTime expectedKey = keyIterator.next();\n+ assertThat(bucket.getKeyAsString(), equalTo(Long.toString(expectedKey.getMillis() / millisDivider)));\n+ assertThat(((DateTime) bucket.getKey()), equalTo(expectedKey));\n+ assertThat(bucket.getDocCount(), equalTo(1L));\n+ }\n+ }\n+\n public void testSingleValuedFieldOrderedByKeyAsc() throws Exception {\n SearchResponse response = client().prepareSearch(\"idx\")\n .addAggregation(dateHistogram(\"histo\")", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java", "status": "modified" } ] }
{ "body": "When trying to upgrade from alpha3 to a more recent (snapshot) version\n\n**Elasticsearch version**: 5.0.0-alpha3\n\n**OS version**: Centos 7\n\n**Steps to reproduce**:\n\n```\nrpm -i elasticsearch-5.0.0-alpha3.rpm\nsystemctl daemon-reload\nsystemctl start elasticsearch\n# use journalctl -f to check that alpha3 has started, msut be aborted\n journalctl -f -n 40\n\nrpm -Uvh elasticsearch-5.0.0-alpha4.rpm\nsystemctl daemon-reload\nsystemctl restart elasticsearch\n```\n\nOutput of journalctl\n\n```\nJun 22 12:44:56 localhost.localdomain elasticsearch[31947]: Exception in thread \"main\" java.lang.IllegalStateException: Unable to access 'path.scripts' (/etc/elasticsearch/scripts)\n```\n\nWhen installing alpha3 first, then `/etc/elasticsearch/scripts` is not created, neither it is on upgrade to alpha4. However, there are additional problems when creating that directory\n\n```\nmkdir /etc/elasticsearch/scripts\nsystemctl start elasticsearch\njournalctl -f -n 40\n```\n\noutput is\n\n```\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: [2016-06-22 12:49:01,953][INFO ][node ] [Hank McCoy] version[5.0.0-alpha4], pid[31996], build[b0da471/2016-06-22T12:33:48.164Z], OS[Linux/3.10.0-327.18.2.el7.x86_64/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_91/25.91-b14]\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: [2016-06-22 12:49:01,954][INFO ][node ] [Hank McCoy] initializing ...\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: Exception in thread \"main\" java.lang.IllegalStateException: Unable to initialize modules\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: Likely root cause: java.nio.file.NoSuchFileException: /usr/share/elasticsearch/modules/ingest-grok/plugin-descriptor.properties\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at sun.nio.fs.UnixException.translateToIOException(UnixException.java:86)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:102)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:107)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at sun.nio.fs.UnixFileSystemProvider.newByteChannel(UnixFileSystemProvider.java:214)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at java.nio.file.Files.newByteChannel(Files.java:361)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at java.nio.file.Files.newByteChannel(Files.java:407)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at java.nio.file.spi.FileSystemProvider.newInputStream(FileSystemProvider.java:384)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at java.nio.file.Files.newInputStream(Files.java:152)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.plugins.PluginInfo.readFromProperties(PluginInfo.java:74)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.plugins.PluginsService.getModuleBundles(PluginsService.java:327)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.plugins.PluginsService.<init>(PluginsService.java:131)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.node.Node.<init>(Node.java:211)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.node.Node.<init>(Node.java:172)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:175)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:175)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:250)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:96)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:91)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.cli.SettingCommand.execute(SettingCommand.java:54)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:91)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.cli.Command.main(Command.java:53)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:70)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:63)\nJun 22 12:49:01 localhost.localdomain elasticsearch[31996]: Refer to the log for complete error details.\n```\n\nThe issue here seems to be that the `ingest-grok` module does not exist after alpha3 anymore (has been moved into `ingest-common`, but it was not cleaned up properly on upgrade.\n\nIf we decide that upgrading from alpha3 to newer alpha version does not need to work, than the last part of this can potentially be ignored, however we still need to ensure that upgrading from any older (non alpha) elasticsearch version to a newer one works.\n", "comments": [ { "body": "> If we decide that upgrading from alpha3 to newer alpha version does not need to work, than the last part of this can potentially be ignored, however we still need to ensure that upgrading from any older (non alpha) elasticsearch version to a newer one works.\n\nI don't think we need to support upgrading between pre-releses. The packaging tests [do test upgrading from the previous major version](https://github.com/elastic/elasticsearch/blob/6671c0cf09ffe9dbc31936b25524df88d29612ed/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats). Right now they test upgrading from 2.0.0; I will open a PR to move this to 2.3.3.\n", "created_at": "2016-06-22T17:01:22Z" }, { "body": "I opened #19029.\n", "created_at": "2016-06-22T17:19:26Z" }, { "body": "IMO we should still ensure that uninstalling an RPM cleans up `/usr/share/elasticsearch/modules`\n\nOtherwise this just postpones the problem until an official release uses different modules.\n", "created_at": "2016-06-23T06:59:00Z" }, { "body": "> IMO we should still ensure that uninstalling an RPM cleans up /usr/share/elasticsearch/modules\n> Otherwise this just postpones the problem until an official release uses different modules.\n\nAgreed!\n", "created_at": "2016-06-23T11:56:10Z" }, { "body": "Our packaging tests in CI now cover upgrades from all eligible versions, I think that we can close this one out.", "created_at": "2018-02-21T15:37:55Z" } ], "number": 19026, "title": "Packaging: Upgrading from alpha3 using RPM does not work" }
{ "body": "This commit moves the upgrade test to test upgrading from version 2.3.3\ninstead of from version 2.0.0.\n\nCloses #19026\n", "number": 19029, "review_comments": [], "title": "Move upgrade test to upgrade from version 2.3.3" }
{ "commits": [ { "message": "Move upgrade test to upgrade from version 2.3.3\n\nThis commit moves the upgrade test to test upgrading from version 2.3.3\ninstead of from version 2.0.0." } ], "files": [ { "diff": "@@ -58,7 +58,7 @@ for (String box : getProperties().get('vagrant.boxes', 'sample').split(',')) {\n \n /* The version of elasticsearch that we upgrade *from* as part of testing\n * upgrades. */\n-String upgradeFromVersion = '2.0.0'\n+String upgradeFromVersion = '2.3.3'\n \n configurations {\n test", "filename": "qa/vagrant/build.gradle", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: \n5.0 A3\n\n**JVM version**: \n1.8.0_91\n\n**OS version**:\nCentOS 7\n\n**Description of the problem including expected versus actual behavior**:\nOn 5.0 A2 the automatic upgrade https://github.com/elastic/elasticsearch/pull/17861 was working fine for a mapped string using an analyzer (Path Hierarchy Tokenizer), after upgrade to 5.0 A3 it didn't work, so I think we have a regression at that point.\n\n**Steps to reproduce**:\n1. Define an analyser for a path_hierarchy\n\n\"analyzer\": {\n \"paths\": {\n \"tokenizer\": \"path_hierarchy\"\n }\n1. Set it for an analyzed string on mapping \n\n**Provide logs (if relevant)**:\n", "comments": [ { "body": "I tried the following locally on a fresh download of 5.0 alpha3\n\n```\nPUT test\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"path_analyzer\": {\n \"type\": \"custom\",\n \"tokenizer\": \"path_tokenizer\"\n }\n },\n \"tokenizer\": {\n \"path_tokenizer\": {\n \"type\": \"path_hierarchy\"\n }\n }\n }\n },\n \"mappings\": {\n \"test\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"string\",\n \"analyzer\": \"path_analyzer\"\n }\n }\n }\n }\n}\n\nGET test/_mapping\n```\n\nwhich returned\n\n```\n{\n \"test\": {\n \"mappings\": {\n \"test\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"analyzer\": \"path_analyzer\"\n }\n }\n }\n }\n }\n}\n```\n\nThis looks correct to me. Could you provide a recreation for the issue that you are seeing?\n", "created_at": "2016-06-20T11:00:16Z" }, { "body": "@jpountz I've tried to reproduce on my environment and it didn't happen anymore. The only difference was that I updated from Alpha 2 to Aplha3 using RPM. I've tried to do it again but the error didn't happen. I'll close. Thanks.\n", "created_at": "2016-06-20T13:15:03Z" }, { "body": "@jpountz, I reproduced the error with a fresh Alpha 3, see the \"bar\" definition.\n\n```\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"path_analyzer\": {\n \"type\": \"custom\",\n \"tokenizer\": \"path_tokenizer\"\n }\n },\n \"tokenizer\": {\n \"path_tokenizer\": {\n \"type\": \"path_hierarchy\"\n }\n }\n }\n },\n \"mappings\": {\n \"test\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"string\",\n \"analyzer\": \"path_analyzer\"\n },\n \"bar\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\",\n \"fields\": {\n \"tree\": {\n \"type\": \"string\",\n \"analyzer\": \"path_analyzer\"\n }\n },\n \"include_in_all\": false,\n \"fielddata\": false\n }\n }\n }\n }\n}\n```\n", "created_at": "2016-06-20T22:05:35Z" }, { "body": "Actually, this is just to do with `include_in_all`:\n\n```\nPUT t\n{\n \"mappings\": {\n \"test\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"string\",\n \"include_in_all\": false\n }\n }\n }\n }\n}\n```\n", "created_at": "2016-06-21T09:28:28Z" }, { "body": "OK, so we need to add `include_in_all` to the whitelist of parameters we automatically upgrade.\n", "created_at": "2016-06-21T12:51:40Z" } ], "number": 18974, "title": "Possible Regression on 5.0A3 for string to text conversion" }
{ "body": "Closes #18974\n", "number": 19004, "review_comments": [], "title": "Upgrade `string` fields to `text`/`keyword` even if `include_in_all` is set." }
{ "commits": [ { "message": "Upgrade `string` fields to `text`/`keyword` even if `include_in_all` is set. #19004\n\nCloses #18974" } ], "files": [ { "diff": "@@ -192,6 +192,11 @@ protected KeywordFieldMapper clone() {\n return (KeywordFieldMapper) super.clone();\n }\n \n+ // pkg-private for testing\n+ Boolean includeInAll() {\n+ return includeInAll;\n+ }\n+\n @Override\n public KeywordFieldMapper includeInAll(Boolean includeInAll) {\n if (includeInAll != null) {", "filename": "core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java", "status": "modified" }, { "diff": "@@ -73,12 +73,12 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc\n \"type\",\n // common keyword parameters, for which the upgrade is straightforward\n \"index\", \"store\", \"doc_values\", \"omit_norms\", \"norms\", \"fields\", \"copy_to\",\n- \"fielddata\", \"ignore_above\"));\n+ \"fielddata\", \"include_in_all\", \"ignore_above\"));\n private static final Set<String> SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE_TO_TEXT = new HashSet<>(Arrays.asList(\n \"type\",\n // common text parameters, for which the upgrade is straightforward\n \"index\", \"store\", \"doc_values\", \"omit_norms\", \"norms\", \"fields\", \"copy_to\",\n- \"fielddata\", \"analyzer\", \"search_analyzer\", \"search_quote_analyzer\"));\n+ \"fielddata\", \"include_in_all\", \"analyzer\", \"search_analyzer\", \"search_quote_analyzer\"));\n \n public static class Defaults {\n public static double FIELDDATA_MIN_FREQUENCY = 0;", "filename": "core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java", "status": "modified" }, { "diff": "@@ -317,6 +317,11 @@ protected TextFieldMapper clone() {\n return (TextFieldMapper) super.clone();\n }\n \n+ // pkg-private for testing\n+ Boolean includeInAll() {\n+ return includeInAll;\n+ }\n+\n @Override\n public TextFieldMapper includeInAll(Boolean includeInAll) {\n if (includeInAll != null) {", "filename": "core/src/main/java/org/elasticsearch/index/mapper/core/TextFieldMapper.java", "status": "modified" }, { "diff": "@@ -198,6 +198,32 @@ public void testUpgradeAnalyzer() throws IOException {\n assertEquals(\"keyword\", field.fieldType().searchQuoteAnalyzer().name());\n }\n \n+ public void testUpgradeTextIncludeInAll() throws IOException {\n+ IndexService indexService = createIndex(\"test\");\n+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"field\").field(\"type\", \"string\")\n+ .field(\"include_in_all\", false).endObject().endObject()\n+ .endObject().endObject().string();\n+ DocumentMapper mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ FieldMapper field = mapper.mappers().getMapper(\"field\");\n+ assertThat(field, instanceOf(TextFieldMapper.class));\n+ assertFalse(((TextFieldMapper) field).includeInAll());\n+ }\n+\n+ public void testUpgradeKeywordIncludeInAll() throws IOException {\n+ IndexService indexService = createIndex(\"test\");\n+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"properties\").startObject(\"field\").field(\"type\", \"string\")\n+ .field(\"index\", \"not_analyzed\").field(\"include_in_all\", true).endObject().endObject()\n+ .endObject().endObject().string();\n+ DocumentMapper mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n+ FieldMapper field = mapper.mappers().getMapper(\"field\");\n+ assertThat(field, instanceOf(KeywordFieldMapper.class));\n+ assertTrue(((KeywordFieldMapper) field).includeInAll());\n+ }\n+\n public void testUpgradeRandomMapping() throws IOException {\n final int iters = 20;\n for (int i = 0; i < iters; ++i) {", "filename": "core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 2.2.0\n\n**JVM version**: Oracle Corporation 1.8.0_45 (64-bit)\n\n**OS version**: Mac OS X 10.11.4 x86_64\n\n**Description of the problem including expected versus actual behavior**:\n\nI am running tests using your integration test framework (`ESIntegTestCase`), however, I am only using it to setup a cluster and index some documents. I have it configured so the http is enabled on a random port, and then I use the rest api for my tests. \n\nI am currently running a test that uses the inner hits functionality and I have found that one of my asserts on the index name of an inner hit fails. After inspecting the json response, I can see that the _index key on the inner hit is missing. Every time it fails it looks like the test framework has sets a test locale that result in non-english text in the logs. I don't know if that has anything to do with it or not, however, this is the only consistent thing I have noticed.\n\nThe inner hits are only against nested documents and the request that fails has multiple levels of nested objects. All levels of the nested response are missing the _index key. My search request is only using the top-level inner hits request structure since I need multiple levels of inner hits.\n\nI know there has been changes to inner hits lately so I am not sure this is even relevant in versions greater than 2.2. \n\n**Provide logs (if relevant)**: \n\nhttps://gist.github.com/mattweber/614e45ce0ab9a0487400fa69400a4131\n", "comments": [ { "body": "@martijnvg could you take a look at this please?\n\n@mattweber any chance you could add the test you're running?\n", "created_at": "2016-05-03T09:29:54Z" }, { "body": "@clintongormley It has some code that belongs to a client I am working with so I can't post it directly. I will work on writing an isolated test that essentially does the same thing though.\n", "created_at": "2016-05-03T13:09:28Z" }, { "body": "@clintongormley @martijnvg \n\nhttps://gist.github.com/mattweber/19c37d9b8a01fd305afa782bc38f9d2f\n\nHere is a quick and dirty test. Add this file as `core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIssueIT.java` to a checkout of the 2.2 branch. The test failure reproduces for me consistently with:\n\n`mvn clean verify -Pdev -Dskip.unit.tests -pl org.elasticsearch:elasticsearch -Dtests.seed=C293056A17C37DCE -Dtests.class=org.elasticsearch.search.innerhits.InnerHitsIssueIT -Dtests.method=\"testIssue18091\" -Des.logger.level=ERROR -Dtests.assertion.disabled=false -Dtests.security.manager=true -Dtests.heap.size=512m -Dtests.locale=sv_SE -Dtests.timezone=Portugal`\n", "created_at": "2016-05-03T15:54:23Z" }, { "body": "thanks @mattweber \n", "created_at": "2016-05-03T17:18:54Z" }, { "body": "Reproducible against `2.x` with following:\n\n`mvn verify -Pdev -Dskip.unit.tests -pl org.elasticsearch:elasticsearch -Dtests.seed=7AA0546C9A7151D7 -Dtests.class=org.elasticsearch.search.innerhits.InnerHitsIssueIT -Dtests.method=\"testIssue18091\" -Des.logger.level=ERROR -Dtests.assertion.disabled=false -Dtests.security.manager=true -Dtests.heap.size=512m -Dtests.locale=mk -Dtests.timezone=Etc/GMT+9`\n\nSame test \n", "created_at": "2016-05-04T00:44:17Z" }, { "body": "Also reproducible against current master (5.x) using the new multi-level inner hit functionality inside of the nested queries vs. a top-level request. \n\nHere is the test for master:\nhttps://gist.github.com/mattweber/578c77e1c5f148e13a3a9c238d169fea\n\nHere is my reproduce command:\n`gradle :core:integTest -Dtests.seed=98F2AB350B0F5BF2 -Dtests.class=org.elasticsearch.search.innerhits.InnerHitsIssueIT -Dtests.method=\"testIssue18091\" -Des.logger.level=WARN -Dtests.security.manager=true -Dtests.locale=zh-CN -Dtests.timezone=Etc/GMT-8`\n\nNote, I run this test in a loop until it fails. It usually takes less than 5 runs before I hit the failure.\n", "created_at": "2016-05-04T14:41:00Z" }, { "body": "@mattweber @clintongormley If I recall correctly during node to node serialization the `_index` key was not included for nested inner hits, since it isn't really needed, because the `_index` key is always the same as the parent search hit. The inconsistency here is that when search hits aren't serialized then the `_index` key is included. (which happens in test with a single node)\n\nI think the inconsistency should be fixed or the `_index` key should always be included in inner hits. \n", "created_at": "2016-05-17T10:05:39Z" }, { "body": "Thanks @martijnvg! Is index/type/id even needed here? It will always be the same as the parent document right? Plus we have the `_nested` object that gives us the offsets inside source nested array.\n", "created_at": "2016-06-17T13:51:18Z" }, { "body": "@mattweber I agree. index/type/id are not needed for nested inner hits.\n", "created_at": "2016-06-17T16:48:20Z" } ], "number": 18091, "title": "Inner Hits sometimes missing \"_index\" key" }
{ "body": "PR for #18091\n", "number": 18995, "review_comments": [], "title": "Don't include `_id`, `_type` and `_index` keys in search response for inner hits" }
{ "commits": [ { "message": "inner_hits: Don't include `_id`, `_type` and `_index` keys in search response for inner hits\n\nCloses #18091" } ], "files": [ { "diff": "@@ -439,13 +439,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.field(\"_shard\", shard.shardId());\n builder.field(\"_node\", shard.nodeIdText());\n }\n- if (shard != null) {\n- builder.field(Fields._INDEX, shard.indexText());\n- }\n- builder.field(Fields._TYPE, type);\n- builder.field(Fields._ID, id);\n if (nestedIdentity != null) {\n nestedIdentity.toXContent(builder, params);\n+ } else {\n+ if (shard != null) {\n+ builder.field(Fields._INDEX, shard.indexText());\n+ }\n+ builder.field(Fields._TYPE, type);\n+ builder.field(Fields._ID, id);\n }\n if (version != -1) {\n builder.field(Fields._VERSION, version);", "filename": "core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java", "status": "modified" }, { "diff": "@@ -180,6 +180,9 @@ with inner hits defined inside the query dsl.\n * Source filtering for inner hits inside nested queries requires full field names instead of relative field names.\n This is now consistent for source filtering on other places in the search API.\n \n+* Nested inner hits will now no longer include `_index`, `_type` and `_id` keys. For nested inner hits these values\n+are always the same as the `_index`, `_type` and `_id` keys of the root search hit.\n+\n ==== Query Profiler\n \n In the response for profiling queries, the `query_type` has been renamed to `type` and `lucene` has been renamed to", "filename": "docs/reference/migration/migrate_5_0/search.asciidoc", "status": "modified" }, { "diff": "@@ -118,8 +118,6 @@ An example of a response snippet that could be generated from the above search r\n \"total\": ...,\n \"hits\": [\n {\n- \"_type\": \"question\",\n- \"_id\": \"1\",\n \"_nested\": {\n \"field\": \"comments\",\n \"offset\": 2", "filename": "docs/reference/search/request/inner-hits.asciidoc", "status": "modified" }, { "diff": "@@ -0,0 +1,84 @@\n+---\n+setup:\n+ - do:\n+ indices.create:\n+ index: test\n+ body:\n+ mappings:\n+ type_1: {\n+ properties: {\n+ nested_field : {\n+ type: nested\n+ }\n+ }\n+ }\n+ type_2: {}\n+ type_3: {\n+ _parent: {\n+ type: type_2\n+ }\n+ }\n+\n+---\n+\"Nested inner hits\":\n+ - do:\n+ index:\n+ index: test\n+ type: type_1\n+ id: 1\n+ body: {\n+ \"nested_field\" : [\n+ {\n+ \"foo\": \"bar\"\n+ }\n+ ]\n+ }\n+\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ search:\n+ body: { \"query\" : { \"nested\" : { \"path\" : \"nested_field\", \"query\" : { \"match_all\" : {} }, \"inner_hits\" : {} } } }\n+ - match: { hits.total: 1 }\n+ - match: { hits.hits.0._index: \"test\" }\n+ - match: { hits.hits.0._type: \"type_1\" }\n+ - match: { hits.hits.0._id: \"1\" }\n+ - is_false: hits.hits.0.inner_hits.nested_field.hits.hits.0._index\n+ - is_false: hits.hits.0.inner_hits.nested_field.hits.hits.0._type\n+ - is_false: hits.hits.0.inner_hits.nested_field.hits.hits.0._id\n+ - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.field: \"nested_field\" }\n+ - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.offset: 0 }\n+ - is_false: hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.child\n+\n+---\n+\"Parent/child inner hits\":\n+ - do:\n+ index:\n+ index: test\n+ type: type_2\n+ id: 1\n+ body: {\"foo\": \"bar\"}\n+\n+ - do:\n+ index:\n+ index: test\n+ type: type_3\n+ id: 1\n+ parent: 1\n+ body: {\"bar\": \"baz\"}\n+\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ search:\n+ body: { \"query\" : { \"has_child\" : { \"type\" : \"type_3\", \"query\" : { \"match_all\" : {} }, \"inner_hits\" : {} } } }\n+ - match: { hits.total: 1 }\n+ - match: { hits.hits.0._index: \"test\" }\n+ - match: { hits.hits.0._type: \"type_2\" }\n+ - match: { hits.hits.0._id: \"1\" }\n+ - match: { hits.hits.0.inner_hits.type_3.hits.hits.0._index: \"test\" }\n+ - match: { hits.hits.0.inner_hits.type_3.hits.hits.0._type: \"type_3\" }\n+ - match: { hits.hits.0.inner_hits.type_3.hits.hits.0._id: \"1\" }\n+ - is_false: hits.hits.0.inner_hits.type_3.hits.hits.0._nested", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml", "status": "added" } ] }
{ "body": "**Elasticsearch version**: 2.3.2\n\n**JVM version**: 1.8.0_60\n\n**OS version**: osx\n\n**Description of the problem including expected versus actual behavior**:\n\n**Steps to reproduce**:\n1. Set `index.number_of_shards: 3` in `config/elasticsearch.yml`\n2. Create the scripts index with a single shard\n\n```\nDELETE /.scripts/\n\nPUT /.scripts/\n{\n \"settings\": {\n \"index\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n}\n```\n1. Call `GET /.scripts/` and see there are three shards for this index.\n\nThis does not happen with index templates it seems.\nAlso regular indexes are created correctly with one shard.\n", "comments": [ { "body": "> This does not happen with index templates it seems.\n\nUsing templates is the way to go IMO.\nIf I'm not mistaken, in 5.0, you can't set anymore index level settings in `elasticsearch.yml`.\n", "created_at": "2016-06-16T09:15:00Z" }, { "body": "The problem is here:\nhttps://github.com/elastic/elasticsearch/blob/b2c4c323e1014563dd02e9cc2f3a9175fd831e78/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java#L279\n\nWhen creating `.scripts` index, it always overrides the number of shards settings irregardless if it has been specified in the create index request.\n", "created_at": "2016-06-16T09:25:01Z" }, { "body": "Fixed via #18965\n", "created_at": "2016-06-21T06:51:03Z" } ], "number": 18912, "title": "Putting .scripts index takes `index.number_of_shards` into account" }
{ "body": "PR for #18912\n", "number": 18965, "review_comments": [ { "body": "s/tt/it/\n", "created_at": "2016-06-20T13:32:51Z" } ], "title": "Don't ignore custom sharding settings in create index request for `.scripts` index" }
{ "commits": [ { "message": "script: Don't ignore custom sharding settings in create index request for '.scripts' index\n\nCloses #18912" } ], "files": [ { "diff": "@@ -267,20 +267,24 @@ public ClusterState execute(ClusterState currentState) throws Exception {\n }\n // now, put the request settings, so they override templates\n indexSettingsBuilder.put(request.settings());\n+ int defaultNumberOfShards;\n+ int defaultNumberOfReplicas;\n if (request.index().equals(ScriptService.SCRIPT_INDEX)) {\n- indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 1));\n+ defaultNumberOfShards = 1;\n+ defaultNumberOfReplicas = 0;\n } else {\n- if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {\n- indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));\n- }\n+ defaultNumberOfShards = 5;\n+ defaultNumberOfReplicas = 1;\n }\n- if (request.index().equals(ScriptService.SCRIPT_INDEX)) {\n- indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 0));\n+ if (request.index().equals(ScriptService.SCRIPT_INDEX) && indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {\n+ // For .script index it only make sense to set auto expand replicas if number of replicas hasn't been set:\n indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, \"0-all\");\n- } else {\n- if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {\n- indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));\n- }\n+ }\n+ if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {\n+ indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, defaultNumberOfShards));\n+ }\n+ if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {\n+ indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, defaultNumberOfReplicas));\n }\n \n if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {", "filename": "core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java", "status": "modified" }, { "diff": "@@ -26,16 +26,22 @@\n import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse;\n import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;\n import org.elasticsearch.action.support.IndicesOptions;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.IndexNotFoundException;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.script.ScriptService;\n import org.elasticsearch.script.groovy.GroovyPlugin;\n import org.elasticsearch.test.ESIntegTestCase;\n import org.junit.Test;\n \n+import java.io.IOException;\n import java.util.Collection;\n import java.util.Collections;\n \n+import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.nullValue;\n+\n @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)\n public class ScriptIndexSettingsTests extends ESIntegTestCase {\n \n@@ -44,6 +50,19 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {\n return pluginList(GroovyPlugin.class);\n }\n \n+ @Override\n+ public void randomIndexTemplate() throws IOException {\n+ // don't set random index template, because we are testing here what happens if no custom settings have been\n+ // specified\n+ }\n+\n+ @Override\n+ public Settings indexSettings() {\n+ // don't set random index settings, because we are testing here what happens if no custom settings have been\n+ // specified\n+ return Settings.EMPTY;\n+ }\n+\n @Test\n public void testScriptIndexSettings() {\n PutIndexedScriptResponse putIndexedScriptResponse =\n@@ -77,6 +96,27 @@ public void testScriptIndexSettings() {\n assertEquals(\"Auto expand replicas should be 0-all\", \"0-all\", numberOfReplicas);\n }\n \n+ @Test\n+ public void testScriptIndexDefaults() {\n+ createIndex(ScriptService.SCRIPT_INDEX);\n+ IndexMetaData indexMetaData = client().admin().cluster().prepareState().get()\n+ .getState().getMetaData().index(ScriptService.SCRIPT_INDEX);\n+ assertThat(indexMetaData.getNumberOfShards(), equalTo(1));\n+ assertThat(indexMetaData.getNumberOfReplicas(), equalTo(0));\n+ assertThat(indexMetaData.getSettings().get(\"index.auto_expand_replicas\"), equalTo(\"0-all\"));\n+\n+ client().admin().indices().prepareDelete(ScriptService.SCRIPT_INDEX).get();\n+ client().admin().indices().prepareCreate(ScriptService.SCRIPT_INDEX)\n+ .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)\n+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2))\n+ .get();\n+ indexMetaData = client().admin().cluster().prepareState().get()\n+ .getState().getMetaData().index(ScriptService.SCRIPT_INDEX);\n+ assertThat(indexMetaData.getNumberOfShards(), equalTo(3));\n+ assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));\n+ assertThat(indexMetaData.getSettings().get(\"index.auto_expand_replicas\"), nullValue());\n+ }\n+\n @Test\n public void testDeleteScriptIndex() {\n PutIndexedScriptResponse putIndexedScriptResponse =", "filename": "modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptIndexSettingsTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: master\n\n**JVM version**:\n\n```\njava version \"1.8.0_77\"\nJava(TM) SE Runtime Environment (build 1.8.0_77-b03)\nJava HotSpot(TM) 64-Bit Server VM (build 25.77-b03, mixed mode)\n```\n\n**OS version**: OS X 10.11.5\n\n**Description of the problem including expected versus actual behavior**:\nTrying to run `gradle build` but getting an error instead of build output, console output below. It looks like the `org.gradle.logging.progress` package was [added in 2.14](https://github.com/gradle/gradle/commit/a8be591089bbf9df86fcc58fc155b8e1329df524) and moved the `org.gradle.logging.ProgressLogger` class in the process.\n\n**Steps to reproduce**:\n1. Install gradle 2.14\n2. Checkout `master`\n3. Run `gradle build`\n\n**Provide logs (if relevant)**:\n\n``` sh\nelasticsearch [master] $ gradle build\n:buildSrc:clean\n:buildSrc:compileJava\n:buildSrc:compileGroovy\nstartup failed:\n/Users/spalger/dev/es/elasticsearch/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy: 28: unable to resolve class org.gradle.logging.ProgressLogger\n @ line 28, column 1.\n import org.gradle.logging.ProgressLogger\n ^\n\n/Users/spalger/dev/es/elasticsearch/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy: 25: unable to resolve class org.gradle.logging.ProgressLogger\n @ line 25, column 1.\n import org.gradle.logging.ProgressLogger\n ^\n\n/Users/spalger/dev/es/elasticsearch/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy: 23: unable to resolve class org.gradle.logging.ProgressLogger\n @ line 23, column 1.\n import org.gradle.logging.ProgressLogger\n ^\n\n3 errors\n\n:buildSrc:compileGroovy FAILED\n\nFAILURE: Build failed with an exception.\n\n* What went wrong:\nExecution failed for task ':compileGroovy'.\n> Compilation failed; see the compiler error output for details.\n\n* Try:\nRun with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output.\n\nBUILD FAILED\n\nTotal time: 5.056 secs\n```\n", "comments": [ { "body": "Just confirmed this is not broken in gradle 2.13\n", "created_at": "2016-06-17T03:39:31Z" }, { "body": "I have the save question as you when building with gradle\n", "created_at": "2016-06-17T06:01:21Z" }, { "body": "Related: https://discuss.gradle.org/t/gradle-2-14-breaks-plugins-using-consolerenderer/18045 .\n", "created_at": "2016-06-17T07:31:14Z" }, { "body": "This is actually worse than just the class being moved. Apparently they considered the package org.gradle.logging to be \"internal\", and in 2.14 internal classes are finally not available to plugins (and this class move makes it truly internal). So until they add back ProgressLogger as part of the plugin API, all our nice logging would disappear...\n\nI'm going to add a check for now in BuildPlugin init that the gradle version is equal exactly to 2.13...\n", "created_at": "2016-06-18T17:19:08Z" }, { "body": "Ah and of course this is hard to check for 2.13 because the failure happens inside buildSrc before we even get to check the gradle version...\n", "created_at": "2016-06-18T17:21:20Z" }, { "body": "I opened #18955 as a stopgap so at least the error message is clear when trying to use 2.14\n", "created_at": "2016-06-18T17:29:37Z" }, { "body": "Due to gradle core developer Adrian Kelly\n\nhttps://discuss.gradle.org/t/bug-in-gradle-2-14-rc1-no-service-of-type-styledtextoutputfactory/17638/3\n\nthere is no big chance that ProgressLogger will be available (again). So my suggestion is to adapt to Gradle 2.14 (including upcoming Gradle 3) as soon as possible by aligning the Elasticsearch build scripts/plugins to the reduced capabilities in https://docs.gradle.org/current/userguide/logging.html\n", "created_at": "2016-06-24T13:28:15Z" }, { "body": "Any chance to reconsider https://github.com/elastic/elasticsearch/pull/13744 due to this issue here?\nI'm not sure if keeping a 50kb blob out of the repo is worth forcing potential contributors to either downgrade system gradle or start keeping around a bunch of gradle versions that happen to work with ES.\n", "created_at": "2016-07-13T21:48:32Z" }, { "body": "@jprante \n\n> there is no big chance that ProgressLogger will be available (again)\n\nThat is simply not true. I spoke with developers at gradle during Gradle Summit and they understand that progress logger is important. I expect it to come back, in some form, in the future:\nhttps://discuss.gradle.org/t/can-gradle-team-add-progresslogger-and-progressloggerfactory-to-public-api/18176/6\n\n@mfussenegger \n\n> Any chance to reconsider #13744 due to this issue here?\n\nThe size is not the issue there. It is that we do not want _binary_ blobs in our repo. I would be ok with a custom equivalent of the gradle wrapper that depended on java 8 and jjs to download the gradle binary, but I have not investigated the real feasibility of such a solution. In the meantime, you don't need to manage \"a bunch\" of versions, just two, 2.13 and whatever other version you are on. You can add your own gradle wrapper file then that just runs gradle 2.13 wherever it is on your system. I would even be ok with adding this to the gitignore so that you can update the repo without it looking like some outlier file.\n", "created_at": "2016-07-13T22:19:54Z" }, { "body": "> It is that we do not want binary blobs in our repo.\n\nAren't the zip files for bwc testing also binary files?\n\n> In the meantime, you don't need to manage \"a bunch\" of versions, just two, 2.13 \n\nI'm probably being a bit too pessimistic here and exaggerating.\nAnyway, it's not much of a problem for me personally. Just wanted to bring it up because it definetly _is_ a stepping stone.\n", "created_at": "2016-07-13T22:39:38Z" }, { "body": "I think it would be helpful to add the requirement for Gradle 2.13 to the docs for contribution and to make it more explicit that it is required in the main readme. Currently the readme says: \n\n> You’ll need to have a modern version of Gradle installed – 2.13 should do.\n\nWhich makes it sound like 2.13 or upwards is fine. \n\nThere's no mention of version on the contribution doc.\n\nIt's only a small issue and the error message makes it very clear what has gone wrong, but it could save the time of people like me, as I just downloaded the latest version of Gradle purely for the sake of contributing to the project.\n\nI'd be happy to make the change myself since I was after something simple first anyway. Is it precisely version 2.13 that works, or can slightly older Gradle versions work too? \n", "created_at": "2016-10-03T17:40:22Z" }, { "body": "@manterfield Please do make a PR! I agree we should update the readme/contrib doc wording given our current limitation.\n", "created_at": "2016-10-03T17:41:57Z" }, { "body": "And it must be 2.13 at this time.\n", "created_at": "2016-10-03T17:42:29Z" }, { "body": "Thanks @rjernst, made a PR (#20776) with doc updates in. \n", "created_at": "2016-10-06T10:20:58Z" }, { "body": "Closed by #22669. The docs will be updated once we have moved our builds to use Gradle 3.x and feel comfortable removing support for 2.13.", "created_at": "2017-01-19T11:20:27Z" }, { "body": "Sorry, this has to be reopened, IntelliJ is unhappy with the change.", "created_at": "2017-01-20T23:18:36Z" }, { "body": "Pushed a fix for IntelliJ.", "created_at": "2017-01-24T13:04:17Z" } ], "number": 18935, "title": "Gradle 2.14 compatibility?" }
{ "body": "see #18935\n", "number": 18955, "review_comments": [], "title": "Build: Require exactly gradle 2.13" }
{ "commits": [ { "message": "Build: Require exactly gradle 2.13\n\nsee #18935" } ], "files": [ { "diff": "@@ -23,6 +23,16 @@ apply plugin: 'groovy'\n \n group = 'org.elasticsearch.gradle'\n \n+// TODO: remove this when upgrading to a version that supports ProgressLogger\n+// gradle 2.14 made internal apis unavailable to plugins, and gradle considered\n+// ProgressLogger to be an internal api. Until this is made available again,\n+// we can't upgrade without losing our nice progress logging\n+// NOTE that this check duplicates that in BuildPlugin, but we need to check\n+// early here before trying to compile the broken classes in buildSrc\n+if (GradleVersion.current() != GradleVersion.version('2.13')) {\n+ throw new GradleException('Gradle 2.13 is required to build elasticsearch')\n+}\n+\n if (project == rootProject) {\n // change the build dir used during build init, so that doing a clean\n // won't wipe out the buildscript jar", "filename": "buildSrc/build.gradle", "status": "modified" } ] }
{ "body": "In 5.0 we use BM25, which means that query coordination should always be disabled. This works correctly with the `bool` query but the `multi_match` query enables coordination incorrectly:\n\n```\nPUT t/t/1\n{\n \"foo\": \"one\",\n \"bar\": \"two\"\n}\n\nGET t/_search\n{\n \"query\": {\n \"multi_match\": {\n \"query\": \"one two\",\n \"fields\": [\"foo\", \"bar\"]\n }\n },\n \"explain\": true\n}\n```\n\nReturns:\n\n```\n {\n \"value\": 0.5,\n \"description\": \"coord(1/2)\",\n \"details\": []\n }\n```\n", "comments": [ { "body": "> In 5.0 we use BM25, which means that query coordination should always be disabled.\n\nThe default similarity is still TFIDF which is referred as `classic`. I opened https://github.com/elastic/elasticsearch/pull/18948 to change the default similarity to BM25. \n\n> This works correctly with the bool query but the multi_match query enables coordination incorrectly\n\nThis is how the `match_query` works. It's the same on 2.x, I didn't test 1.7 but it should do the same. \nCoords are disabled only when multiple terms are at the same position in the query otherwise the coords are always enabled and we rely on this functionality for the relevancy (documents matching a lot of terms are scored first). Regarding BM25, things will change since the coords are not taken into account in this similarity but this should not be considered as a bug ? To be honest I don't know what's the impact on the relevancy for queries produced by a `match_query` or a `multi_match_query`. @jpountz @rmuir WDYT ? \n", "created_at": "2016-06-17T17:32:36Z" }, { "body": "> This is how the match_query works.\n\nThis is why a SynonymQuery was added when defaulting to BM25 that handles this case in a more generic way for any scoring system (including classic TF/IDF): \n\n> One issue was the generation of synonym queries (posinc=0) by QueryBuilder (used by parsers). This is kind of a corner case (query-time synonyms), but we should make it nicer. The current code in trunk disables coord, which makes no sense for anything but the vector space impl. Instead, this patch adds a SynonymQuery which treats occurrences of any term as a single pseudoterm. With english wordnet as a query-time synonym dict, this query gives 12% improvement in MAP for title queries on BM25, and 2% with Classic (not significant). So its a better generic approach for synonyms that works with all scoring models.\n> \n> I wanted to use BlendedTermQuery, but it seems to have problems at a glance, it tries to \"take on the world\", it has problems like not working with distributed scoring (doesn't consult indexsearcher for stats). Anyway this one is a different, simpler approach, which only works for a single field, and which calls tf(sum) a single time. \n\nhttps://issues.apache.org/jira/browse/LUCENE-6789\n\nPlease use it :)\n", "created_at": "2016-06-17T19:37:53Z" }, { "body": "I am currently using 2.3.3 and planning to start experimenting with BM25 since Lucene 6.0 makes that the default. I assumed it was ready to be used in ES as well. Is that not the case? Should I wait until 5.0 especially since I use `multi_match` heavily?\n", "created_at": "2016-06-20T03:05:43Z" }, { "body": "Thanks for the clarification @rmuir. \n@rpedela please use it as well ;) Concerning the `multi_match` you may want to experiment different boosts as the scoring for BM25 is different and the range of possible values differ. \n", "created_at": "2016-06-20T07:01:25Z" }, { "body": "@clintongormley I think we can close this issue (please reopen if you disagree). The coords are a TF/IDF thing that was added as a countermeasure for terms with very high term frequency where the score constantly increases and never reaches a saturation point like in `BM25`. \n", "created_at": "2016-06-21T11:38:56Z" } ], "number": 18944, "title": "Multi_match should not enable coordination in bool query with BM25" }
{ "body": "The default similarity was set to `classic` which refers to TFIDF and has not been moved after the upgrade to Lucene 6.\n\nThough moving to BM25 could have some downside for queries that relies on coordination factor (match_query, multi_match_query) ?\nrelates #18944\n", "number": 18948, "review_comments": [ { "body": "Even though we currently allow to redefine the `BM25` similarity, this does not sound like a good idea to me so maybe we should not document it?\n", "created_at": "2016-06-20T10:26:56Z" }, { "body": "> Even though we currently allow to redefine the BM25 similarity,\n\nNot anymore: https://github.com/elastic/elasticsearch/pull/16682\n\nI am trying to say that the default is `BM25` but I'll do differently. The `bm25`explanation should be first and declared as the default for ES.\n", "created_at": "2016-06-20T16:24:25Z" }, { "body": "great :)\n", "created_at": "2016-06-21T07:29:44Z" } ], "title": "Change default similarity to BM25" }
{ "commits": [ { "message": "Change default similarity to BM25\n\nThe default similarity was set to `classic` which refers to TFIDF and has not been moved after the upgrade to Lucene 6.\n\nThough moving to BM25 could have some downside for queries that relies on coordination factor (match_query, multi_match_query) ?\n\nrelates #18944" } ], "files": [ { "diff": "@@ -36,7 +36,7 @@\n \n public final class SimilarityService extends AbstractIndexComponent {\n \n- public final static String DEFAULT_SIMILARITY = \"classic\";\n+ public final static String DEFAULT_SIMILARITY = \"BM25\";\n private final Similarity defaultSimilarity;\n private final Similarity baseSimilarity;\n private final Map<String, SimilarityProvider> similarities;\n@@ -121,8 +121,8 @@ public SimilarityProvider getSimilarity(String name) {\n return similarities.get(name);\n }\n \n- public SimilarityProvider getDefaultSimilarity() {\n- return similarities.get(\"default\");\n+ Similarity getDefaultSimilarity() {\n+ return defaultSimilarity;\n }\n \n static class PerFieldSimilarity extends PerFieldSimilarityWrapper {", "filename": "core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java", "status": "modified" }, { "diff": "@@ -18,6 +18,8 @@\n */\n package org.elasticsearch.index.similarity;\n \n+import org.apache.lucene.search.similarities.BM25Similarity;\n+import org.apache.lucene.search.similarities.ClassicSimilarity;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.settings.Settings;\n@@ -27,7 +29,15 @@\n \n import java.util.Collections;\n \n+import static org.hamcrest.Matchers.instanceOf;\n+\n public class SimilarityServiceTests extends ESTestCase {\n+ public void testDefaultSimilarity() {\n+ Settings settings = Settings.builder().build();\n+ IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(\"test\", settings);\n+ SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap());\n+ assertThat(service.getDefaultSimilarity(), instanceOf(BM25Similarity.class));\n+ }\n \n // Tests #16594\n public void testOverrideBuiltInSimilarity() {\n@@ -53,10 +63,10 @@ public void testOverrideBuiltInSimilarityPreV3() {\n }\n \n // Tests #16594\n- public void testDefaultSimilarity() {\n- Settings settings = Settings.builder().put(\"index.similarity.default.type\", \"BM25\").build();\n+ public void testOverrideDefaultSimilarity() {\n+ Settings settings = Settings.builder().put(\"index.similarity.default.type\", \"classic\").build();\n IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(\"test\", settings);\n SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap());\n- assertTrue(service.getDefaultSimilarity() instanceof BM25SimilarityProvider);\n+ assertTrue(service.getDefaultSimilarity() instanceof ClassicSimilarity);\n }\n }", "filename": "core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java", "status": "modified" }, { "diff": "@@ -47,25 +47,11 @@ Here we configure the DFRSimilarity so it can be referenced as\n [float]\n === Available similarities\n \n-[float]\n-[[classic-similarity]]\n-==== Classic similarity\n-\n-The classic similarity that is based on the TF/IDF model. This\n-similarity has the following option:\n-\n-`discount_overlaps`::\n- Determines whether overlap tokens (Tokens with\n- 0 position increment) are ignored when computing norm. By default this\n- is true, meaning overlap tokens do not count when computing norms.\n-\n-Type name: `classic`\n-\n [float]\n [[bm25]]\n-==== BM25 similarity\n+==== BM25 similarity (*default*)\n \n-Another TF/IDF based similarity that has built-in tf normalization and\n+TF/IDF based similarity that has built-in tf normalization and\n is supposed to work better for short fields (like names). See\n http://en.wikipedia.org/wiki/Okapi_BM25[Okapi_BM25] for more details.\n This similarity has the following options:\n@@ -86,6 +72,20 @@ This similarity has the following options:\n \n Type name: `BM25`\n \n+[float]\n+[[classic-similarity]]\n+==== Classic similarity\n+\n+The classic similarity that is based on the TF/IDF model. This\n+similarity has the following option:\n+\n+`discount_overlaps`::\n+ Determines whether overlap tokens (Tokens with\n+ 0 position increment) are ignored when computing norm. By default this\n+ is true, meaning overlap tokens do not count when computing norms.\n+\n+Type name: `classic`\n+\n [float]\n [[drf]]\n ==== DFR similarity\n@@ -178,5 +178,5 @@ You can change the default similarity for all fields by putting the following se\n \n [source,js]\n --------------------------------------------------\n-index.similarity.default.type: BM25\n+index.similarity.default.type: classic\n --------------------------------------------------", "filename": "docs/reference/index-modules/similarity.asciidoc", "status": "modified" }, { "diff": "@@ -196,3 +196,7 @@ The <<search-request-preference,search preference>> `_prefer_node` has\n been superseded by `_prefer_nodes`. By specifying a single node,\n `_prefer_nodes` provides the same functionality as `_prefer_node` but\n also supports specifying multiple nodes.\n+\n+==== Default similarity\n+\n+The default similarity has been changed to `BM25`.", "filename": "docs/reference/migration/migrate_5_0/search.asciidoc", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\n2.3.2\n\n**JVM version**:\n1.8.0_60 OpenJDK 64-Bit Server VM 25.60-b23 Oracle Corporation\n\n**OS version**:\nOpenSUSE Leap 42.1\n\n**Description of the problem including expected versus actual behavior**:\nInvalid JSON returned in Elasticsearch results. Expect results from Elasticsearch to be valid JSON.\n\n**Steps to reproduce**:\nWe have an aggregation that includes longitude, latitude and location name separated by a pipe. ES isn't escaping the quotes within one of them. ES returns this as results:\n\n{\"took\":59,\"aggregations\":{\"time_group\":{\"buckets\":[{\"key\":78469,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.774663|-112.467699|Chino Valley Fire Department\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":78388,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.775424|-112.452459|Windmill 7\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":78333,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.773574|-112.465953|\"19 Remembered\" Hotshot Honors\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":78309,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.773311|-112.46529|Chino Valley Saluted America's Heros\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":78305,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.77383|-112.465432|Chino Valley Public Library\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":78112,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.769398|-112.447186|Chino Valley Community Center Park\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":77976,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.777816|-112.447498|Stoned Security Donkey\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":77830,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.772045|-112.427787|Peavine Trails\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":77585,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.644372|-112.432358|Prescott Municipal Airport\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":77371,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"34.760367|-112.447588|Hope Lutheran Church\"}]}}]},\"w\":{\"value\":1.0}},{\"key\":56892,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"35.483665|-111.556402|Coconino National Forest\"}]}}]},\"w\":{\"value\":0.89}},{\"key\":51987,\"d\":{\"buckets\":[{\"d\":{\"buckets\":[{\"key\":\"35.313604|-112.852838|Seligman, Arizona\"}]}}]},\"w\":{\"value\":0.84}}]}}}\n\nSpecifically this bucket key isn't escaped:\n\n{\"key\":\"34.773574|-112.465953|\"19 Remembered\" Hotshot Honors\"}\n", "comments": [ { "body": "Found a workaround... if you don't use the filter_path, the results are properly escaped.\n", "created_at": "2016-04-30T23:49:36Z" }, { "body": "@shawn-digitalpoint then this looks like a bug in how `filter_path` works. Can you share your request (including the `filter_path`)?\n", "created_at": "2016-05-01T19:52:11Z" }, { "body": "Using PHP to build the JSON, but this is the PHP array (not sure why github's code block isn't working, sorry...):\n\n```\n$params = array(\n 'size' => 0,\n 'sort' => array(\n 'date' => array(\n 'order' => 'desc'\n )\n ),\n 'query' => array(\n 'bool' => array(\n 'must' => array(\n array(\n 'match' => array(\n 'attacking_agent_id' => 100,\n ),\n ),\n array(\n 'range' => array(\n 'date' => array(\n 'gte' => 1430606758\n )\n ),\n ),\n )\n )\n ),\n 'aggs' => array(\n 'time_group' => array(\n 'terms' => array(\n 'field' => 'location_id',\n 'size' => 100000,\n 'order'=> array(\n '_term' => 'desc'\n ),\n ),\n\n 'aggs' => array(\n 'd' => array(\n 'terms' => array(\n 'field' => 'timegroup_agent',\n 'size' => 1\n ),\n 'aggs' => array(\n 'd' => array(\n 'terms' => array(\n 'field' => 'raw_geo_name',\n 'size' => 1\n )\n ),\n )\n ),\n 'total_date' => array(\n 'sum' => array(\n 'field' => 'date'\n )\n ),\n 'unique_attacks' => array(\n 'cardinality' => array(\n \"field\" => \"timegroup_agent\"\n )\n ),\n\n \"w\" => array (\n 'bucket_script' => array(\n 'buckets_path' => array(\n 'totalDate' => 'total_date',\n 'unique' => 'unique_attacks',\n 'docCount' => '_count',\n ),\n \"script\" => 'min(' . $maxWeight . ', max(' . $minWeight . ', round(((' . ($daysBack * 86400) . ' * docCount) - ((' . (XenForo_Application::$time) . ' * docCount) - totalDate)) / docCount / ' . ($daysBack * 86400) . ' * unique * 100) / 100))' // round(((' . ($daysBack * 86400) . ' * docCount) - ((' . XenForo_Application::$time . ' * docCount) - totalDate)) / ' . ($daysBack * 86400) . ' * 100) / 100\n )\n ),\n )\n ),\n )\n);\n```\n", "created_at": "2016-05-02T22:47:56Z" }, { "body": "Oh, and the filter_path used when getting invalid JSON is:\n\ntook,aggregations.time_group.buckets.key,aggregations.time_group.buckets.d.buckets.d.buckets.key,aggregations.time_group.buckets.w\n", "created_at": "2016-05-02T22:52:08Z" }, { "body": "Thanks for reporting! It's probably a Jackson bug so I created FasterXML/jackson-core/pull/280 to see and discuss this issue with the Jackson team. I'll update this issue once I have some feedback.\n", "created_at": "2016-05-06T11:56:06Z" }, { "body": "The Jackson issue has been confirmed and merged. This issue will be resolved once we move on a new release of jackson-core that integrates the fix.\n", "created_at": "2016-05-09T08:42:52Z" }, { "body": "awesome @tlrx great to fix this kind of stuff upstream directly!\n", "created_at": "2016-05-09T10:19:10Z" } ], "number": 18076, "title": "Invalid JSON Being Returned" }
{ "body": "Jackson 2.8.1 has been released. This PR updates the version to the latest version of Jackson which fix #18076 and is also more strict when it build objects.\n", "number": 18939, "review_comments": [ { "body": "should we put an assertions that it actually is a JsonWriteContext?\n", "created_at": "2016-06-17T08:38:59Z" }, { "body": "hmm should we wait until it's GA?\n", "created_at": "2016-06-17T08:39:14Z" }, { "body": "Yes, I commented the pull request with \"This is work in progress\"\n\nI'm just tracking all necessary changes here so that I have some code to share with Jackson's team.\n\nEdit: ok, label \"WIP\" was missing, sorry.\n", "created_at": "2016-06-17T08:57:25Z" }, { "body": "Sure\n", "created_at": "2016-08-02T09:48:35Z" }, { "body": "Version updated to 2.8.1\n", "created_at": "2016-08-02T09:48:47Z" }, { "body": "pretty printing is difficult, not sure if it works on windows with trailing whiltespaces / new lines etc?\n", "created_at": "2016-08-03T09:03:18Z" }, { "body": "don't prettyprint please we don't test this here\n", "created_at": "2016-08-03T09:03:42Z" }, { "body": "same here don't do the pretty printing please \n", "created_at": "2016-08-03T09:26:52Z" }, { "body": "Of course... thanks for pointing this\n", "created_at": "2016-08-03T09:44:14Z" } ], "title": "Update to jackson 2.8.1" }
{ "commits": [ { "message": "Update to Jackson 2.8.1\n\nThis commit updates Jackson to the 2.8.1 version, which is more strict when it comes to build objects. It also adds the snakeyaml dependency that was previously shaded in jackson libs.\n\nIt also closes #18076" } ], "files": [ { "diff": "@@ -676,7 +676,6 @@\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]ingest[/\\\\]SimulatePipelineRequestParsingTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]ingest[/\\\\]SimulatePipelineResponseTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]ingest[/\\\\]WriteableIngestDocumentTests.java\" checks=\"LineLength\" />\n- <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]MultiSearchRequestTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]search[/\\\\]SearchRequestBuilderTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]support[/\\\\]AutoCreateIndexTests.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]action[/\\\\]support[/\\\\]IndicesOptionsTests.java\" checks=\"LineLength\" />\n@@ -1009,7 +1008,6 @@\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]ShardReduceIT.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]ShardSizeTestCase.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]SignificantTermsIT.java\" checks=\"LineLength\" />\n- <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]SignificantTermsSignificanceScoreIT.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]TermsDocCountErrorIT.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]TermsShardMinDocCountIT.java\" checks=\"LineLength\" />\n <suppress files=\"core[/\\\\]src[/\\\\]test[/\\\\]java[/\\\\]org[/\\\\]elasticsearch[/\\\\]search[/\\\\]aggregations[/\\\\]bucket[/\\\\]nested[/\\\\]NestedAggregatorTests.java\" checks=\"LineLength\" />", "filename": "buildSrc/src/main/resources/checkstyle_suppressions.xml", "status": "modified" }, { "diff": "@@ -4,7 +4,8 @@ lucene = 6.1.0\n # optional dependencies\n spatial4j = 0.6\n jts = 1.13\n-jackson = 2.7.1\n+jackson = 2.8.1\n+snakeyaml = 1.15\n log4j = 1.2.17\n slf4j = 1.6.2\n jna = 4.2.2", "filename": "buildSrc/version.properties", "status": "modified" }, { "diff": "@@ -0,0 +1 @@\n+fd13b1c033741d48291315c6370f7d475a42dccf\n\\ No newline at end of file", "filename": "client/sniffer/licenses/jackson-core-2.8.1.jar.sha1", "status": "added" }, { "diff": "@@ -69,6 +69,7 @@ dependencies {\n compile 'org.joda:joda-convert:1.2'\n \n // json and yaml\n+ compile \"org.yaml:snakeyaml:${versions.snakeyaml}\"\n compile \"com.fasterxml.jackson.core:jackson-core:${versions.jackson}\"\n compile \"com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}\"\n compile \"com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}\"", "filename": "core/build.gradle", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import com.fasterxml.jackson.core.base.GeneratorBase;\n import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate;\n import com.fasterxml.jackson.core.io.SerializedString;\n+import com.fasterxml.jackson.core.json.JsonWriteContext;\n import com.fasterxml.jackson.core.util.DefaultIndenter;\n import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;\n import org.elasticsearch.common.bytes.BytesReference;\n@@ -271,7 +272,9 @@ private void writeStartRaw(String fieldName) throws IOException {\n public void writeEndRaw() {\n assert base != null : \"JsonGenerator should be of instance GeneratorBase but was: \" + generator.getClass();\n if (base != null) {\n- base.getOutputContext().writeValue();\n+ JsonStreamContext context = base.getOutputContext();\n+ assert (context instanceof JsonWriteContext) : \"Expected an instance of JsonWriteContext but was: \" + context.getClass();\n+ ((JsonWriteContext) context).writeValue();\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java", "status": "modified" }, { "diff": "@@ -87,11 +87,6 @@ public String text() throws IOException {\n \n @Override\n public BytesRef utf8Bytes() throws IOException {\n- // Tentative workaround for https://github.com/elastic/elasticsearch/issues/8629\n- // TODO: Remove this when we upgrade jackson to 2.6.x.\n- if (parser.getTextLength() == 0) {\n- return new BytesRef();\n- }\n return new BytesRef(CharBuffer.wrap(parser.getTextCharacters(), parser.getTextOffset(), parser.getTextLength()));\n }\n ", "filename": "core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java", "status": "modified" }, { "diff": "@@ -44,19 +44,33 @@\n public class MultiSearchRequestTests extends ESTestCase {\n public void testSimpleAdd() throws Exception {\n MultiSearchRequest request = parseMultiSearchRequest(\"/org/elasticsearch/action/search/simple-msearch1.json\");\n- assertThat(request.requests().size(), equalTo(8));\n- assertThat(request.requests().get(0).indices()[0], equalTo(\"test\"));\n- assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n- assertThat(request.requests().get(0).types().length, equalTo(0));\n- assertThat(request.requests().get(1).indices()[0], equalTo(\"test\"));\n- assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n- assertThat(request.requests().get(1).types()[0], equalTo(\"type1\"));\n- assertThat(request.requests().get(2).indices()[0], equalTo(\"test\"));\n- assertThat(request.requests().get(2).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed())));\n- assertThat(request.requests().get(3).indices()[0], equalTo(\"test\"));\n- assertThat(request.requests().get(3).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n- assertThat(request.requests().get(4).indices()[0], equalTo(\"test\"));\n- assertThat(request.requests().get(4).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n+ assertThat(request.requests().size(),\n+ equalTo(8));\n+ assertThat(request.requests().get(0).indices()[0],\n+ equalTo(\"test\"));\n+ assertThat(request.requests().get(0).indicesOptions(),\n+ equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n+ assertThat(request.requests().get(0).types().length,\n+ equalTo(0));\n+ assertThat(request.requests().get(1).indices()[0],\n+ equalTo(\"test\"));\n+ assertThat(request.requests().get(1).indicesOptions(),\n+ equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n+ assertThat(request.requests().get(1).types()[0],\n+ equalTo(\"type1\"));\n+ assertThat(request.requests().get(2).indices()[0],\n+ equalTo(\"test\"));\n+ assertThat(request.requests().get(2).indicesOptions(),\n+ equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed())));\n+ assertThat(request.requests().get(3).indices()[0],\n+ equalTo(\"test\"));\n+ assertThat(request.requests().get(3).indicesOptions(),\n+ equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n+ assertThat(request.requests().get(4).indices()[0],\n+ equalTo(\"test\"));\n+ assertThat(request.requests().get(4).indicesOptions(),\n+ equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed())));\n+\n assertThat(request.requests().get(5).indices(), is(Strings.EMPTY_ARRAY));\n assertThat(request.requests().get(5).types().length, equalTo(0));\n assertThat(request.requests().get(6).indices(), is(Strings.EMPTY_ARRAY));\n@@ -119,10 +133,27 @@ public void testSimpleAdd4() throws Exception {\n }\n \n public void testResponseErrorToXContent() throws IOException {\n- MultiSearchResponse response = new MultiSearchResponse(new MultiSearchResponse.Item[]{new MultiSearchResponse.Item(null, new IllegalStateException(\"foobar\")), new MultiSearchResponse.Item(null, new IllegalStateException(\"baaaaaazzzz\"))});\n+ MultiSearchResponse response = new MultiSearchResponse(\n+ new MultiSearchResponse.Item[]{\n+ new MultiSearchResponse.Item(null, new IllegalStateException(\"foobar\")),\n+ new MultiSearchResponse.Item(null, new IllegalStateException(\"baaaaaazzzz\"))\n+ });\n+\n XContentBuilder builder = XContentFactory.jsonBuilder();\n+ builder.startObject();\n response.toXContent(builder, ToXContent.EMPTY_PARAMS);\n- assertEquals(\"\\\"responses\\\"[{\\\"error\\\":{\\\"root_cause\\\":[{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"foobar\\\"}],\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"foobar\\\"},\\\"status\\\":500},{\\\"error\\\":{\\\"root_cause\\\":[{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"baaaaaazzzz\\\"}],\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"baaaaaazzzz\\\"},\\\"status\\\":500}]\",\n+ builder.endObject();\n+\n+ assertEquals(\"{\\\"responses\\\":[\"\n+ + \"{\"\n+ + \"\\\"error\\\":{\\\"root_cause\\\":[{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"foobar\\\"}],\"\n+ + \"\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"foobar\\\"},\\\"status\\\":500\"\n+ + \"},\"\n+ + \"{\"\n+ + \"\\\"error\\\":{\\\"root_cause\\\":[{\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"baaaaaazzzz\\\"}],\"\n+ + \"\\\"type\\\":\\\"illegal_state_exception\\\",\\\"reason\\\":\\\"baaaaaazzzz\\\"},\\\"status\\\":500\"\n+ + \"}\"\n+ + \"]}\",\n builder.string());\n }\n ", "filename": "core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java", "status": "modified" }, { "diff": "@@ -356,6 +356,7 @@ public void testWriteMapValueWithNullKeys() throws IOException {\n public void testWriteFieldMapWithNullKeys() throws IOException {\n XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));\n try {\n+ builder.startObject();\n builder.field(\"map\", Collections.singletonMap(null, \"test\"));\n fail(\"write map should have failed\");\n } catch(IllegalArgumentException e) {", "filename": "core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java", "status": "modified" }, { "diff": "@@ -319,9 +319,12 @@ public void testSearchAnalyzerSerialization() throws IOException {\n .endObject()\n .endObject().endObject().endObject().string();\n mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n- XContentBuilder builder = XContentFactory.jsonBuilder();\n \n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+ builder.startObject();\n mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(\"include_defaults\", \"true\")));\n+ builder.endObject();\n+\n String mappingString = builder.string();\n assertTrue(mappingString.contains(\"analyzer\"));\n assertTrue(mappingString.contains(\"search_analyzer\"));", "filename": "core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -337,9 +337,12 @@ public void testSearchAnalyzerSerialization() throws IOException {\n .endObject()\n .endObject().endObject().endObject().string();\n mapper = parser.parse(\"type\", new CompressedXContent(mapping));\n- XContentBuilder builder = XContentFactory.jsonBuilder();\n \n+ XContentBuilder builder = XContentFactory.jsonBuilder();\n+ builder.startObject();\n mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(\"include_defaults\", \"true\")));\n+ builder.endObject();\n+\n String mappingString = builder.string();\n assertTrue(mappingString.contains(\"analyzer\"));\n assertTrue(mappingString.contains(\"search_analyzer\"));", "filename": "core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java", "status": "modified" }, { "diff": "@@ -42,6 +42,9 @@\n import org.elasticsearch.common.util.BigArrays;\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.VersionType;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.translog.Translog.Location;\n@@ -351,10 +354,14 @@ public void testStats() throws IOException {\n \n assertEquals(6, copy.estimatedNumberOfOperations());\n assertEquals(431, copy.getTranslogSizeInBytes());\n- assertEquals(\"\\\"translog\\\"{\\n\" +\n- \" \\\"operations\\\" : 6,\\n\" +\n- \" \\\"size_in_bytes\\\" : 431\\n\" +\n- \"}\", copy.toString().trim());\n+\n+ try (XContentBuilder builder = XContentFactory.jsonBuilder()) {\n+ builder.startObject();\n+ copy.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ builder.endObject();\n+\n+ assertEquals(\"{\\\"translog\\\":{\\\"operations\\\":6,\\\"size_in_bytes\\\":431}}\", builder.string());\n+ }\n \n try {\n new TranslogStats(1, -1);", "filename": "core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java", "status": "modified" }, { "diff": "@@ -109,9 +109,14 @@ private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo)\n private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException {\n ToXContent.Params params = ToXContent.EMPTY_PARAMS;\n XContentBuilder param1Builder = jsonBuilder();\n- XContentBuilder param2Builder = jsonBuilder();\n+ param1Builder.startObject();\n param1.toXContent(param1Builder, params);\n+ param1Builder.endObject();\n+\n+ XContentBuilder param2Builder = jsonBuilder();\n+ param2Builder.startObject();\n param2.toXContent(param2Builder, params);\n+ param2Builder.endObject();\n assertThat(param1Builder.string(), equalTo(param2Builder.string()));\n }\n ", "filename": "core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.script.Script;\n import org.elasticsearch.script.ScriptService.ScriptType;\n import org.elasticsearch.search.aggregations.Aggregation;\n+import org.elasticsearch.search.aggregations.Aggregations;\n import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;\n import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptNoParams;\n import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptWithParams;\n@@ -116,7 +117,7 @@ public void testPlugin() throws Exception {\n .execute()\n .actionGet();\n assertSearchResponse(response);\n- StringTerms classes = (StringTerms) response.getAggregations().get(\"class\");\n+ StringTerms classes = response.getAggregations().get(\"class\");\n assertThat(classes.getBuckets().size(), equalTo(2));\n for (Terms.Bucket classBucket : classes.getBuckets()) {\n Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();\n@@ -246,15 +247,15 @@ public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatch\n }\n \n public void testXContentResponse() throws Exception {\n- String type = false || randomBoolean() ? \"text\" : \"long\";\n+ String type = randomBoolean() ? \"text\" : \"long\";\n String settings = \"{\\\"index.number_of_shards\\\": 1, \\\"index.number_of_replicas\\\": 0}\";\n SharedSignificantTermsTestMethods.index01Docs(type, settings, this);\n SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)\n .addAggregation(terms(\"class\").field(CLASS_FIELD).subAggregation(significantTerms(\"sig_terms\").field(TEXT_FIELD)))\n .execute()\n .actionGet();\n assertSearchResponse(response);\n- StringTerms classes = (StringTerms) response.getAggregations().get(\"class\");\n+ StringTerms classes = response.getAggregations().get(\"class\");\n assertThat(classes.getBuckets().size(), equalTo(2));\n for (Terms.Bucket classBucket : classes.getBuckets()) {\n Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();\n@@ -267,13 +268,39 @@ public void testXContentResponse() throws Exception {\n }\n \n XContentBuilder responseBuilder = XContentFactory.jsonBuilder();\n+ responseBuilder.startObject();\n classes.toXContent(responseBuilder, null);\n- String result = null;\n- if (type.equals(\"long\")) {\n- result = \"\\\"class\\\"{\\\"doc_count_error_upper_bound\\\":0,\\\"sum_other_doc_count\\\":0,\\\"buckets\\\":[{\\\"key\\\":\\\"0\\\",\\\"doc_count\\\":4,\\\"sig_terms\\\":{\\\"doc_count\\\":4,\\\"buckets\\\":[{\\\"key\\\":0,\\\"doc_count\\\":4,\\\"score\\\":0.39999999999999997,\\\"bg_count\\\":5}]}},{\\\"key\\\":\\\"1\\\",\\\"doc_count\\\":3,\\\"sig_terms\\\":{\\\"doc_count\\\":3,\\\"buckets\\\":[{\\\"key\\\":1,\\\"doc_count\\\":3,\\\"score\\\":0.75,\\\"bg_count\\\":4}]}}]}\";\n- } else {\n- result = \"\\\"class\\\"{\\\"doc_count_error_upper_bound\\\":0,\\\"sum_other_doc_count\\\":0,\\\"buckets\\\":[{\\\"key\\\":\\\"0\\\",\\\"doc_count\\\":4,\\\"sig_terms\\\":{\\\"doc_count\\\":4,\\\"buckets\\\":[{\\\"key\\\":\\\"0\\\",\\\"doc_count\\\":4,\\\"score\\\":0.39999999999999997,\\\"bg_count\\\":5}]}},{\\\"key\\\":\\\"1\\\",\\\"doc_count\\\":3,\\\"sig_terms\\\":{\\\"doc_count\\\":3,\\\"buckets\\\":[{\\\"key\\\":\\\"1\\\",\\\"doc_count\\\":3,\\\"score\\\":0.75,\\\"bg_count\\\":4}]}}]}\";\n- }\n+ responseBuilder.endObject();\n+\n+ String result = \"{\\\"class\\\":{\\\"doc_count_error_upper_bound\\\":0,\\\"sum_other_doc_count\\\":0,\"\n+ + \"\\\"buckets\\\":[\"\n+ + \"{\"\n+ + \"\\\"key\\\":\\\"0\\\",\"\n+ + \"\\\"doc_count\\\":4,\"\n+ + \"\\\"sig_terms\\\":{\"\n+ + \"\\\"doc_count\\\":4,\"\n+ + \"\\\"buckets\\\":[\"\n+ + \"{\"\n+ + \"\\\"key\\\":\" + (type.equals(\"long\") ? \"0,\" : \"\\\"0\\\",\")\n+ + \"\\\"doc_count\\\":4,\"\n+ + \"\\\"score\\\":0.39999999999999997,\"\n+ + \"\\\"bg_count\\\":5\"\n+ + \"}\"\n+ + \"]\"\n+ + \"}\"\n+ + \"},\"\n+ + \"{\"\n+ + \"\\\"key\\\":\\\"1\\\",\"\n+ + \"\\\"doc_count\\\":3,\"\n+ + \"\\\"sig_terms\\\":{\"\n+ + \"\\\"doc_count\\\":3,\"\n+ + \"\\\"buckets\\\":[\"\n+ + \"{\"\n+ + \"\\\"key\\\":\" + (type.equals(\"long\") ? \"1,\" : \"\\\"1\\\",\")\n+ + \"\\\"doc_count\\\":3,\"\n+ + \"\\\"score\\\":0.75,\"\n+ + \"\\\"bg_count\\\":4\"\n+ + \"}]}}]}}\";\n assertThat(responseBuilder.string(), equalTo(result));\n \n }\n@@ -309,7 +336,7 @@ public void testDeletesIssue7951() throws Exception {\n }\n indexRandom(true, false, indexRequestBuilderList);\n \n- SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)\n+ client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)\n .addAggregation(\n terms(\"class\")\n .field(CLASS_FIELD)\n@@ -334,7 +361,8 @@ public void testBackgroundVsSeparateSet() throws Exception {\n // 1. terms agg on class and significant terms\n // 2. filter buckets and set the background to the other class and set is_background false\n // both should yield exact same result\n- public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuristicExpectingSuperset, SignificanceHeuristic significanceHeuristicExpectingSeparateSets) throws Exception {\n+ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuristicExpectingSuperset,\n+ SignificanceHeuristic significanceHeuristicExpectingSeparateSets) throws Exception {\n \n SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)\n .addAggregation(terms(\"class\")\n@@ -364,18 +392,25 @@ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuris\n .execute()\n .actionGet();\n \n- SignificantTerms sigTerms0 = ((SignificantTerms) (((StringTerms) response1.getAggregations().get(\"class\")).getBucketByKey(\"0\").getAggregations().asMap().get(\"sig_terms\")));\n+ StringTerms classes = response1.getAggregations().get(\"class\");\n+\n+ SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey(\"0\").getAggregations().asMap().get(\"sig_terms\")));\n assertThat(sigTerms0.getBuckets().size(), equalTo(2));\n double score00Background = sigTerms0.getBucketByKey(\"0\").getSignificanceScore();\n double score01Background = sigTerms0.getBucketByKey(\"1\").getSignificanceScore();\n- SignificantTerms sigTerms1 = ((SignificantTerms) (((StringTerms) response1.getAggregations().get(\"class\")).getBucketByKey(\"1\").getAggregations().asMap().get(\"sig_terms\")));\n+ SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey(\"1\").getAggregations().asMap().get(\"sig_terms\")));\n double score10Background = sigTerms1.getBucketByKey(\"0\").getSignificanceScore();\n double score11Background = sigTerms1.getBucketByKey(\"1\").getSignificanceScore();\n \n- double score00SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get(\"0\")).getAggregations().getAsMap().get(\"sig_terms\")).getBucketByKey(\"0\").getSignificanceScore();\n- double score01SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get(\"0\")).getAggregations().getAsMap().get(\"sig_terms\")).getBucketByKey(\"1\").getSignificanceScore();\n- double score10SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get(\"1\")).getAggregations().getAsMap().get(\"sig_terms\")).getBucketByKey(\"0\").getSignificanceScore();\n- double score11SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get(\"1\")).getAggregations().getAsMap().get(\"sig_terms\")).getBucketByKey(\"1\").getSignificanceScore();\n+ Aggregations aggs = response2.getAggregations();\n+\n+ sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get(\"0\")).getAggregations().getAsMap().get(\"sig_terms\");\n+ double score00SeparateSets = sigTerms0.getBucketByKey(\"0\").getSignificanceScore();\n+ double score01SeparateSets = sigTerms0.getBucketByKey(\"1\").getSignificanceScore();\n+\n+ sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get(\"1\")).getAggregations().getAsMap().get(\"sig_terms\");\n+ double score10SeparateSets = sigTerms1.getBucketByKey(\"0\").getSignificanceScore();\n+ double score11SeparateSets = sigTerms1.getBucketByKey(\"1\").getSignificanceScore();\n \n assertThat(score00Background, equalTo(score00SeparateSets));\n assertThat(score01Background, equalTo(score01SeparateSets));\n@@ -401,11 +436,15 @@ public void testScoresEqualForPositiveAndNegative(SignificanceHeuristic heuristi\n .execute()\n .actionGet();\n assertSearchResponse(response);\n- StringTerms classes = (StringTerms) response.getAggregations().get(\"class\");\n+ StringTerms classes = response.getAggregations().get(\"class\");\n assertThat(classes.getBuckets().size(), equalTo(2));\n Iterator<Terms.Bucket> classBuckets = classes.getBuckets().iterator();\n- Collection<SignificantTerms.Bucket> classA = ((SignificantTerms) classBuckets.next().getAggregations().get(\"mySignificantTerms\")).getBuckets();\n- Iterator<SignificantTerms.Bucket> classBBucketIterator = ((SignificantTerms) classBuckets.next().getAggregations().get(\"mySignificantTerms\")).getBuckets().iterator();\n+\n+ Aggregations aggregations = classBuckets.next().getAggregations();\n+ SignificantTerms sigTerms = aggregations.get(\"mySignificantTerms\");\n+\n+ Collection<SignificantTerms.Bucket> classA = sigTerms.getBuckets();\n+ Iterator<SignificantTerms.Bucket> classBBucketIterator = sigTerms.getBuckets().iterator();\n assertThat(classA.size(), greaterThan(0));\n for (SignificantTerms.Bucket classABucket : classA) {\n SignificantTerms.Bucket classBBucket = classBBucketIterator.next();\n@@ -462,8 +501,10 @@ public void testScriptScore() throws ExecutionException, InterruptedException, I\n .actionGet();\n assertSearchResponse(response);\n for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get(\"class\")).getBuckets()) {\n- for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get(\"mySignificantTerms\")).getBuckets()) {\n- assertThat(bucket.getSignificanceScore(), is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()));\n+ SignificantTerms sigTerms = classBucket.getAggregations().get(\"mySignificantTerms\");\n+ for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) {\n+ assertThat(bucket.getSignificanceScore(),\n+ is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()));\n }\n }\n }\n@@ -478,9 +519,7 @@ private ScriptHeuristic getScriptSignificanceHeuristic() throws IOException {\n } else {\n script = new Script(\"native_significance_score_script_no_params\", ScriptType.INLINE, \"native\", null);\n }\n- ScriptHeuristic scriptHeuristic = new ScriptHeuristic(script);\n-\n- return scriptHeuristic;\n+ return new ScriptHeuristic(script);\n }\n \n private void indexRandomFrequencies01(String type) throws ExecutionException, InterruptedException {", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java", "status": "modified" }, { "diff": "@@ -34,13 +34,15 @@\n import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;\n import org.elasticsearch.search.internal.SearchContext;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.threadpool.ThreadPoolStats;\n import org.joda.time.DateTimeZone;\n import org.joda.time.Instant;\n \n import java.io.IOException;\n \n import static java.lang.Math.max;\n import static java.lang.Math.min;\n+import static org.hamcrest.Matchers.equalTo;\n import static org.mockito.Mockito.mock;\n import static org.mockito.Mockito.when;\n \n@@ -149,9 +151,21 @@ public void testXContentRoundTrip() throws Exception {\n ExtendedBounds orig = randomExtendedBounds();\n \n try (XContentBuilder out = JsonXContent.contentBuilder()) {\n+ out.startObject();\n orig.toXContent(out, ToXContent.EMPTY_PARAMS);\n+ out.endObject();\n+\n try (XContentParser in = JsonXContent.jsonXContent.createParser(out.bytes())) {\n- in.nextToken();\n+ XContentParser.Token token = in.currentToken();\n+ assertNull(token);\n+\n+ token = in.nextToken();\n+ assertThat(token, equalTo(XContentParser.Token.START_OBJECT));\n+\n+ token = in.nextToken();\n+ assertThat(token, equalTo(XContentParser.Token.FIELD_NAME));\n+ assertThat(in.currentName(), equalTo(ExtendedBounds.EXTENDED_BOUNDS_FIELD.getPreferredName()));\n+\n ExtendedBounds read = ExtendedBounds.PARSER.apply(in, () -> ParseFieldMatcher.STRICT);\n assertEquals(orig, read);\n } catch (Exception e) {", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java", "status": "modified" }, { "diff": "@@ -71,17 +71,23 @@ public void testThreadPoolStatsToXContent() throws IOException {\n stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L));\n stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L));\n \n-\n+ ThreadPoolStats threadPoolStats = new ThreadPoolStats(stats);\n try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os)) {\n- new ThreadPoolStats(stats).toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ builder.startObject();\n+ threadPoolStats.toXContent(builder, ToXContent.EMPTY_PARAMS);\n+ builder.endObject();\n }\n \n try (XContentParser parser = XContentType.JSON.xContent().createParser(os.bytes())) {\n XContentParser.Token token = parser.currentToken();\n assertNull(token);\n \n token = parser.nextToken();\n- assertThat(token, equalTo(XContentParser.Token.VALUE_STRING));\n+ assertThat(token, equalTo(XContentParser.Token.START_OBJECT));\n+\n+ token = parser.nextToken();\n+ assertThat(token, equalTo(XContentParser.Token.FIELD_NAME));\n+ assertThat(parser.currentName(), equalTo(ThreadPoolStats.Fields.THREAD_POOL));\n \n token = parser.nextToken();\n assertThat(token, equalTo(XContentParser.Token.START_OBJECT));", "filename": "core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1 @@\n+fd13b1c033741d48291315c6370f7d475a42dccf\n\\ No newline at end of file", "filename": "distribution/licenses/jackson-core-2.8.1.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+3a6fb7e75c9972559a78cf5cfc5a48a41a13ea40\n\\ No newline at end of file", "filename": "distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+005b73867bc12224946fc67fc8d49d9f5e698d7f\n\\ No newline at end of file", "filename": "distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+eb63166c723b0b4b9fb5298fca232a2f6612ec34\n\\ No newline at end of file", "filename": "distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+3b132bea69e8ee099f416044970997bde80f4ea6\n\\ No newline at end of file", "filename": "distribution/licenses/snakeyaml-1.15.jar.sha1", "status": "added" }, { "diff": "@@ -0,0 +1,176 @@\n+ Apache License\n+ Version 2.0, January 2004\n+ http://www.apache.org/licenses/\n+\n+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n+\n+ 1. Definitions.\n+\n+ \"License\" shall mean the terms and conditions for use, reproduction,\n+ and distribution as defined by Sections 1 through 9 of this document.\n+\n+ \"Licensor\" shall mean the copyright owner or entity authorized by\n+ the copyright owner that is granting the License.\n+\n+ \"Legal Entity\" shall mean the union of the acting entity and all\n+ other entities that control, are controlled by, or are under common\n+ control with that entity. For the purposes of this definition,\n+ \"control\" means (i) the power, direct or indirect, to cause the\n+ direction or management of such entity, whether by contract or\n+ otherwise, or (ii) ownership of fifty percent (50%) or more of the\n+ outstanding shares, or (iii) beneficial ownership of such entity.\n+\n+ \"You\" (or \"Your\") shall mean an individual or Legal Entity\n+ exercising permissions granted by this License.\n+\n+ \"Source\" form shall mean the preferred form for making modifications,\n+ including but not limited to software source code, documentation\n+ source, and configuration files.\n+\n+ \"Object\" form shall mean any form resulting from mechanical\n+ transformation or translation of a Source form, including but\n+ not limited to compiled object code, generated documentation,\n+ and conversions to other media types.\n+\n+ \"Work\" shall mean the work of authorship, whether in Source or\n+ Object form, made available under the License, as indicated by a\n+ copyright notice that is included in or attached to the work\n+ (an example is provided in the Appendix below).\n+\n+ \"Derivative Works\" shall mean any work, whether in Source or Object\n+ form, that is based on (or derived from) the Work and for which the\n+ editorial revisions, annotations, elaborations, or other modifications\n+ represent, as a whole, an original work of authorship. For the purposes\n+ of this License, Derivative Works shall not include works that remain\n+ separable from, or merely link (or bind by name) to the interfaces of,\n+ the Work and Derivative Works thereof.\n+\n+ \"Contribution\" shall mean any work of authorship, including\n+ the original version of the Work and any modifications or additions\n+ to that Work or Derivative Works thereof, that is intentionally\n+ submitted to Licensor for inclusion in the Work by the copyright owner\n+ or by an individual or Legal Entity authorized to submit on behalf of\n+ the copyright owner. For the purposes of this definition, \"submitted\"\n+ means any form of electronic, verbal, or written communication sent\n+ to the Licensor or its representatives, including but not limited to\n+ communication on electronic mailing lists, source code control systems,\n+ and issue tracking systems that are managed by, or on behalf of, the\n+ Licensor for the purpose of discussing and improving the Work, but\n+ excluding communication that is conspicuously marked or otherwise\n+ designated in writing by the copyright owner as \"Not a Contribution.\"\n+\n+ \"Contributor\" shall mean Licensor and any individual or Legal Entity\n+ on behalf of whom a Contribution has been received by Licensor and\n+ subsequently incorporated within the Work.\n+\n+ 2. Grant of Copyright License. Subject to the terms and conditions of\n+ this License, each Contributor hereby grants to You a perpetual,\n+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n+ copyright license to reproduce, prepare Derivative Works of,\n+ publicly display, publicly perform, sublicense, and distribute the\n+ Work and such Derivative Works in Source or Object form.\n+\n+ 3. Grant of Patent License. Subject to the terms and conditions of\n+ this License, each Contributor hereby grants to You a perpetual,\n+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n+ (except as stated in this section) patent license to make, have made,\n+ use, offer to sell, sell, import, and otherwise transfer the Work,\n+ where such license applies only to those patent claims licensable\n+ by such Contributor that are necessarily infringed by their\n+ Contribution(s) alone or by combination of their Contribution(s)\n+ with the Work to which such Contribution(s) was submitted. If You\n+ institute patent litigation against any entity (including a\n+ cross-claim or counterclaim in a lawsuit) alleging that the Work\n+ or a Contribution incorporated within the Work constitutes direct\n+ or contributory patent infringement, then any patent licenses\n+ granted to You under this License for that Work shall terminate\n+ as of the date such litigation is filed.\n+\n+ 4. Redistribution. You may reproduce and distribute copies of the\n+ Work or Derivative Works thereof in any medium, with or without\n+ modifications, and in Source or Object form, provided that You\n+ meet the following conditions:\n+\n+ (a) You must give any other recipients of the Work or\n+ Derivative Works a copy of this License; and\n+\n+ (b) You must cause any modified files to carry prominent notices\n+ stating that You changed the files; and\n+\n+ (c) You must retain, in the Source form of any Derivative Works\n+ that You distribute, all copyright, patent, trademark, and\n+ attribution notices from the Source form of the Work,\n+ excluding those notices that do not pertain to any part of\n+ the Derivative Works; and\n+\n+ (d) If the Work includes a \"NOTICE\" text file as part of its\n+ distribution, then any Derivative Works that You distribute must\n+ include a readable copy of the attribution notices contained\n+ within such NOTICE file, excluding those notices that do not\n+ pertain to any part of the Derivative Works, in at least one\n+ of the following places: within a NOTICE text file distributed\n+ as part of the Derivative Works; within the Source form or\n+ documentation, if provided along with the Derivative Works; or,\n+ within a display generated by the Derivative Works, if and\n+ wherever such third-party notices normally appear. The contents\n+ of the NOTICE file are for informational purposes only and\n+ do not modify the License. You may add Your own attribution\n+ notices within Derivative Works that You distribute, alongside\n+ or as an addendum to the NOTICE text from the Work, provided\n+ that such additional attribution notices cannot be construed\n+ as modifying the License.\n+\n+ You may add Your own copyright statement to Your modifications and\n+ may provide additional or different license terms and conditions\n+ for use, reproduction, or distribution of Your modifications, or\n+ for any such Derivative Works as a whole, provided Your use,\n+ reproduction, and distribution of the Work otherwise complies with\n+ the conditions stated in this License.\n+\n+ 5. Submission of Contributions. Unless You explicitly state otherwise,\n+ any Contribution intentionally submitted for inclusion in the Work\n+ by You to the Licensor shall be under the terms and conditions of\n+ this License, without any additional terms or conditions.\n+ Notwithstanding the above, nothing herein shall supersede or modify\n+ the terms of any separate license agreement you may have executed\n+ with Licensor regarding such Contributions.\n+\n+ 6. Trademarks. This License does not grant permission to use the trade\n+ names, trademarks, service marks, or product names of the Licensor,\n+ except as required for reasonable and customary use in describing the\n+ origin of the Work and reproducing the content of the NOTICE file.\n+\n+ 7. Disclaimer of Warranty. Unless required by applicable law or\n+ agreed to in writing, Licensor provides the Work (and each\n+ Contributor provides its Contributions) on an \"AS IS\" BASIS,\n+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+ implied, including, without limitation, any warranties or conditions\n+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n+ PARTICULAR PURPOSE. You are solely responsible for determining the\n+ appropriateness of using or redistributing the Work and assume any\n+ risks associated with Your exercise of permissions under this License.\n+\n+ 8. Limitation of Liability. In no event and under no legal theory,\n+ whether in tort (including negligence), contract, or otherwise,\n+ unless required by applicable law (such as deliberate and grossly\n+ negligent acts) or agreed to in writing, shall any Contributor be\n+ liable to You for damages, including any direct, indirect, special,\n+ incidental, or consequential damages of any character arising as a\n+ result of this License or out of the use or inability to use the\n+ Work (including but not limited to damages for loss of goodwill,\n+ work stoppage, computer failure or malfunction, or any and all\n+ other commercial damages or losses), even if such Contributor\n+ has been advised of the possibility of such damages.\n+\n+ 9. Accepting Warranty or Additional Liability. While redistributing\n+ the Work or Derivative Works thereof, You may choose to offer,\n+ and charge a fee for, acceptance of support, warranty, indemnity,\n+ or other liability obligations and/or rights consistent with this\n+ License. However, in accepting such obligations, You may act only\n+ on Your own behalf and on Your sole responsibility, not on behalf\n+ of any other Contributor, and only if You agree to indemnify,\n+ defend, and hold each Contributor harmless for any liability\n+ incurred by, or claims asserted against, such Contributor by reason\n+ of your accepting any such warranty or additional liability.\n+\n+ END OF TERMS AND CONDITIONS", "filename": "distribution/licenses/snakeyaml-LICENSE.txt", "status": "added" }, { "diff": "@@ -0,0 +1,24 @@\n+***The art of simplicity is a puzzle of complexity.***\n+\n+## Overview ##\n+[YAML](http://yaml.org) is a data serialization format designed for human readability and interaction with scripting languages.\n+\n+SnakeYAML is a YAML processor for the Java Virtual Machine.\n+\n+## SnakeYAML features ##\n+\n+* a **complete** [YAML 1.1 processor](http://yaml.org/spec/1.1/current.html). In particular, SnakeYAML can parse all examples from the specification.\n+* Unicode support including UTF-8/UTF-16 input/output.\n+* high-level API for serializing and deserializing native Java objects.\n+* support for all types from the [YAML types repository](http://yaml.org/type/index.html).\n+* relatively sensible error messages.\n+\n+## Info ##\n+ * [Changes](https://bitbucket.org/asomov/snakeyaml/wiki/Changes)\n+ * [Documentation](https://bitbucket.org/asomov/snakeyaml/wiki/Documentation)\n+\n+## Contribute ##\n+* Mercurial DVCS is used to dance with the [source code](https://bitbucket.org/asomov/snakeyaml/src).\n+* If you find a bug in SnakeYAML, please [file a bug report](https://bitbucket.org/asomov/snakeyaml/issues?status=new&status=open).\n+* You may discuss SnakeYAML at\n+[the mailing list](http://groups.google.com/group/snakeyaml-core).\n\\ No newline at end of file", "filename": "distribution/licenses/snakeyaml-NOTICE.txt", "status": "added" }, { "diff": "@@ -98,7 +98,7 @@ setup:\n query_type: \"unknown\"\n \n - match: { responses.0.hits.total: 2 }\n- - match: { responses.1.error.root_cause.0.type: json_parse_exception }\n+ - match: { responses.1.error.root_cause.0.type: json_e_o_f_exception }\n - match: { responses.1.error.root_cause.0.reason: \"/Unexpected.end.of.input/\" }\n - match: { responses.2.hits.total: 1 }\n - match: { responses.3.error.root_cause.0.type: parsing_exception }", "filename": "modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yaml", "status": "modified" } ] }
{ "body": "https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+java9-periodic/256/consoleText\n\nThis looks like a fairly minimal reproduction:\n\n```\n public void testAppendStringIntoMap() {\n assertEquals(\"nullcat\", exec(\"def a = new HashMap(); a.cat += 'cat'\"));\n }\n```\n\nI'm not super familiar with this code. Does someone else want it or should I grab it?\n", "comments": [ { "body": "I'm going to disable the java-9 indy string concat stuff until this is fixed because it is causing build failures on java-9. Without the indy thing no ArrayIndexOutOfBoundsException....\n", "created_at": "2016-06-16T19:03:45Z" }, { "body": "I pushed 13d16fbf41e0cf90c9051e015681cc2198bb0334 which disables java-9's indy string work and adds a unit test that fails with it enabled (basically just the snippet in the description).\n", "created_at": "2016-06-16T19:10:53Z" }, { "body": "This is a bug in above code, it should not even trigger string concats! It is just a side effect that it works.\n", "created_at": "2016-06-16T19:54:29Z" }, { "body": "Whats the exception you get on this?\n", "created_at": "2016-06-16T19:56:21Z" }, { "body": "Can somebody please update this outdated java version?\n", "created_at": "2016-06-16T20:01:39Z" }, { "body": "We need to first rint the bytecode. AIIOBE always happens when you fck up the stack frames.\n", "created_at": "2016-06-16T20:08:00Z" }, { "body": "> Can somebody please update this outdated java version?\n\nI'll poke the right folks for that.\n", "created_at": "2016-06-16T20:16:49Z" }, { "body": "> I'll poke the right folks for that.\n\nDone. It still happens for 122 as well.\n", "created_at": "2016-06-16T20:18:27Z" }, { "body": "> Done. It still happens for 122 as well.\n\nOr to be clear - I've poked the right people. They haven't upgraded yet.\n", "created_at": "2016-06-16T20:18:45Z" }, { "body": "> Done. It still happens for 122 as well.\n\nYeah, that was just a side note. See above, the problem is a bug in compound statements. We are just lucky that Java 8 does not fail (because stack keeps valid).\n", "created_at": "2016-06-16T20:22:59Z" }, { "body": "I think the problem here is that the code in EChain does not correctly set cat=true, because the first arg is not actually a string. I think we only have tests for \"string\" + blabla, but never anyothertype + \"xxx\"\n", "created_at": "2016-06-16T20:26:14Z" }, { "body": "I see this if i print bytecode up to this point. Yes I am hooking this in so it will be easier!\n\n```\n 1> hit exception during compile, bytecode so far: \n 1> // class version 52.0 (52)\n 1> // access flags 0x31\n 1> public final class org/elasticsearch/painless/Executable$Script extends org/elasticsearch/painless/Executable {\n 1> // compiled from: <debugging>\n 1> // access flags 0x1\n 1> public <init>(Ljava/lang/String;Ljava/lang/String;Ljava/util/BitSet;)V\n 1> ALOAD 0\n 1> ALOAD 1\n 1> ALOAD 2\n 1> ALOAD 3\n 1> INVOKESPECIAL org/elasticsearch/painless/Executable.<init> (Ljava/lang/String;Ljava/lang/String;Ljava/util/BitSet;)V\n 1> RETURN\n 1> MAXSTACK = 0\n 1> MAXLOCALS = 0\n 1> // access flags 0x1\n 1> public execute(Ljava/util/Map;Lorg/apache/lucene/search/Scorer;Lorg/elasticsearch/search/lookup/LeafDocLookup;Ljava/lang/Object;)Ljava/lang/Object;\n 1> L0\n 1> LINENUMBER 9 L0\n 1> L1\n 1> LINENUMBER 9 L1\n 1> NEW java/util/HashMap\n 1> DUP\n 1> INVOKESPECIAL java/util/HashMap.<init> ()V\n 1> L2\n 1> LINENUMBER 9 L2\n 1> ASTORE 5\n 1> L3\n 1> LINENUMBER 24 L3\n 1> ALOAD 5\n 1> DUP_X1\n 1> L4\n 1> LINENUMBER 25 L4\n 1> INVOKEDYNAMIC cat(Ljava/lang/Object;)Ljava/lang/Object; [\n 1> // handle kind 0x6 : INVOKESTATIC\n 1> org/elasticsearch/painless/DefBootstrap.bootstrap(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;I[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;\n 1> // arguments:\n 1> 1\n 1> ]\n 1> LDC \"cat\"\n 1> INVOKEDYNAMIC concat(Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/String; [\n 1> // handle kind 0x6 : INVOKESTATIC\n 1> java/lang/invoke/StringConcatFactory.makeConcat(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;\n 1> // arguments: none\n 1> ]\n 1> DUP_X1\n 1> L5\n 1> LINENUMBER 25 L5\n 1> INVOKEDYNAMIC cat(Ljava/lang/Object;Ljava/lang/Object;)V [\n 1> // handle kind 0x6 : INVOKESTATIC\n 1> org/elasticsearch/painless/DefBootstrap.bootstrap(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;I[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;\n 1> // arguments:\n 1> 2\n 1> ]\n 1> L6\n 1> LINENUMBER 24 L6\n 1> ARETURN\n 1> MAXSTACK = 0\n 1> MAXLOCALS = 0\n```\n", "created_at": "2016-06-16T21:13:48Z" }, { "body": "How does Java 8 look like?\n", "created_at": "2016-06-16T21:16:33Z" }, { "body": "```\n// class version 52.0 (52)\n// access flags 0x31\npublic final class org/elasticsearch/painless/Executable$Script extends org/elasticsearch/painless/Executable {\n\n // compiled from: <debugging>\n\n // access flags 0x1\n public <init>(Ljava/lang/String;Ljava/lang/String;Ljava/util/BitSet;)V\n ALOAD 0\n ALOAD 1\n ALOAD 2\n ALOAD 3\n INVOKESPECIAL org/elasticsearch/painless/Executable.<init> (Ljava/lang/String;Ljava/lang/String;Ljava/util/BitSet;)V\n RETURN\n MAXSTACK = 4\n MAXLOCALS = 4\n\n // access flags 0x1\n public execute(Ljava/util/Map;Lorg/apache/lucene/search/Scorer;Lorg/elasticsearch/search/lookup/LeafDocLookup;Ljava/lang/Object;)Ljava/lang/Object;\n L0\n LINENUMBER 9 L0\n LINENUMBER 9 L0\n NEW java/util/HashMap\n DUP\n INVOKESPECIAL java/util/HashMap.<init> ()V\n L1\n LINENUMBER 9 L1\n ASTORE 5\n L2\n LINENUMBER 24 L2\n NEW java/lang/StringBuilder\n DUP\n INVOKESPECIAL java/lang/StringBuilder.<init> ()V\n ALOAD 5\n DUP_X1\n L3\n LINENUMBER 25 L3\n INVOKEDYNAMIC cat(Ljava/lang/Object;)Ljava/lang/Object; [\n // handle kind 0x6 : INVOKESTATIC\n org/elasticsearch/painless/DefBootstrap.bootstrap(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;I[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;\n // arguments:\n 1\n ]\n INVOKEVIRTUAL java/lang/StringBuilder.append (Ljava/lang/Object;)Ljava/lang/StringBuilder;\n LDC \"cat\"\n INVOKEVIRTUAL java/lang/StringBuilder.append (Ljava/lang/String;)Ljava/lang/StringBuilder;\n INVOKEVIRTUAL java/lang/StringBuilder.toString ()Ljava/lang/String;\n DUP_X1\n L4\n LINENUMBER 25 L4\n INVOKEDYNAMIC cat(Ljava/lang/Object;Ljava/lang/Object;)V [\n // handle kind 0x6 : INVOKESTATIC\n org/elasticsearch/painless/DefBootstrap.bootstrap(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;I[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;\n // arguments:\n 2\n ]\n L5\n LINENUMBER 24 L5\n ARETURN\n MAXSTACK = 3\n MAXLOCALS = 6\n}\n```\n", "created_at": "2016-06-16T21:20:33Z" }, { "body": "Hi,\nI think I know the problem:\nThe Java 8 old code assumes while duplicating stack arguments that there is a stringbuilder on stack. But for Java 9, it is not there. So the \"dup_x1\" after the \"aload 5\" duplicates the wrong stack item.\n\nI have to look at the generating code ton find a way how to handle this correctly. In fact we duplicate the wronmg item. For indy string concats a plain \"dup\" would be correct.\n", "created_at": "2016-06-16T21:33:59Z" }, { "body": "I opened #18932 with the Debugger improvement.\n", "created_at": "2016-06-16T21:38:21Z" }, { "body": "The bug is in EChain#write: \n\n``` java\nwriter.writeDup(link.size, 1); // dup the StringBuilder\n```\n\nSee the comment. The second arguments must be 0 for indy string concats. Not sure how to fix this.\n", "created_at": "2016-06-16T21:38:28Z" }, { "body": "Yes, thats the fix. all tests pass with that on java 9. we just have to conditionally do it.\n", "created_at": "2016-06-16T21:45:39Z" }, { "body": "I have to verify the other parts doing concats, too. The compound code is hairy (and very complicated), and we had stack bugs before, too.\n\nThe second problem with indy string concats is: You pop a lot of stuff onto stack, while with Java 8 you pop one item and call append. In Java 9 you have no chance to look back in stack unless you record sizes - but then bytecode does not allow it.\n", "created_at": "2016-06-16T21:51:01Z" }, { "body": "EBinary looks fine, no crazy stack duping. So its only EChain that hits the issue. I think we can easily fix this:\n- I let writeNewStrings return an int (the actual size of element it pushed), for java 8 it returns 1, with indy concats just 0. This is the size of the element on stack used for the concats\n- we just use return value in the dup instruction\n", "created_at": "2016-06-16T21:55:44Z" }, { "body": "I added a PR (see above).\n", "created_at": "2016-06-16T22:32:24Z" } ], "number": 18929, "title": "Painless: String += fails with map in java 8" }
{ "body": "in Java 9 there is no stringbuilder on stack! This closes #18929\n", "number": 18933, "review_comments": [], "title": "Fix compound assignment with string concats" }
{ "commits": [ { "message": "Fix compound assignment with string concats. in Java 9 there is no stringbuilder on stack! This closes #18929" } ], "files": [ { "diff": "@@ -209,15 +209,20 @@ public void writeBranch(final Label tru, final Label fals) {\n }\n }\n \n- public void writeNewStrings() {\n+ /** Starts a new string concat.\n+ * @return the size of arguments pushed to stack (the object that does string concats, e.g. a StringBuilder)\n+ */\n+ public int writeNewStrings() {\n if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) {\n // Java 9+: we just push our argument collector onto deque\n stringConcatArgs.push(new ArrayList<>());\n+ return 0; // nothing added to stack\n } else {\n // Java 8: create a StringBuilder in bytecode\n newInstance(STRINGBUILDER_TYPE);\n dup();\n invokeConstructor(STRINGBUILDER_TYPE, STRINGBUILDER_CONSTRUCTOR);\n+ return 1; // StringBuilder on stack\n }\n }\n ", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java", "status": "modified" }, { "diff": "@@ -131,7 +131,7 @@ public final class WriterConstants {\n // not Java 9 - we set it null, so MethodWriter uses StringBuilder:\n bs = null;\n }\n- INDY_STRING_CONCAT_BOOTSTRAP_HANDLE = null; // Disabled until https://github.com/elastic/elasticsearch/issues/18929\n+ INDY_STRING_CONCAT_BOOTSTRAP_HANDLE = bs;\n }\n \n public final static int MAX_INDY_STRING_CONCAT_ARGS = 200;", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java", "status": "modified" }, { "diff": "@@ -294,8 +294,9 @@ void write(MethodWriter writer) {\n // track types going onto the stack. This must be done before the\n // links in the chain are read because we need the StringBuilder to\n // be placed on the stack ahead of any potential concatenation arguments.\n+ int catElementStackSize = 0;\n if (cat) {\n- writer.writeNewStrings();\n+ catElementStackSize = writer.writeNewStrings();\n }\n \n ALink last = links.get(links.size() - 1);\n@@ -312,7 +313,7 @@ void write(MethodWriter writer) {\n // Handle the case where we are doing a compound assignment\n // representing a String concatenation.\n \n- writer.writeDup(link.size, 1); // dup the StringBuilder\n+ writer.writeDup(link.size, catElementStackSize); // dup the top element and insert it before concat helper on stack\n link.load(writer); // read the current link's value\n writer.writeAppendStrings(link.after); // append the link's value using the StringBuilder\n \n@@ -323,7 +324,7 @@ void write(MethodWriter writer) {\n writer.writeAppendStrings(expression.actual); // append the expression's value unless it's also a concatenation\n }\n \n- writer.writeToStrings(); // put the value of the StringBuilder on the stack\n+ writer.writeToStrings(); // put the value for string concat onto the stack\n writer.writeCast(back); // if necessary, cast the String to the lhs actual type\n \n if (link.load) {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java", "status": "modified" } ] }
{ "body": "If a plugin does not exist, then this is returned\n\n```\nbin/elasticsearch-plugin install nonexistentplugin\n-> Downloading nonexistentplugin\nException in thread \"main\" java.net.MalformedURLException: no protocol: nonexistentplugin\n at java.net.URL.<init>(URL.java:586)\n at java.net.URL.<init>(URL.java:483)\n at java.net.URL.<init>(URL.java:432)\n at org.elasticsearch.plugins.InstallPluginCommand.downloadZip(InstallPluginCommand.java:211)\n at org.elasticsearch.plugins.InstallPluginCommand.download(InstallPluginCommand.java:206)\n at org.elasticsearch.plugins.InstallPluginCommand.execute(InstallPluginCommand.java:174)\n at org.elasticsearch.plugins.InstallPluginCommand.execute(InstallPluginCommand.java:162)\n at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:88)\n at org.elasticsearch.cli.MultiCommand.execute(MultiCommand.java:69)\n at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:88)\n at org.elasticsearch.cli.Command.main(Command.java:53)\n at org.elasticsearch.plugins.PluginCli.main(PluginCli.java:57)\n```\n\nAlso I cant seem to find out the URL which is actually requested for debugging purposes anymore, might be intended though?\n", "comments": [ { "body": "`nonexistentplugin` _is_ the url the plugin cli is trying to resolve. The logic is simple (1) is it a known official plugin? (2) is it maven coordinates? (3) finally, try it as a url.\n", "created_at": "2016-03-21T17:36:28Z" }, { "body": "If we want this to be \"nicer\" we could print a message in this final case on the usage info about what is supported (official plugin name, maven coordinates, or url).\n", "created_at": "2016-03-21T17:45:51Z" }, { "body": "> If we want this to be \"nicer\" we could print a message in this final case on the usage info about what is supported (official plugin name, maven coordinates, or url).\n\n++ that was the purpose of this issue I believe. The logic is clear to developers, but the current error is confusing to users.\n", "created_at": "2016-03-21T17:53:28Z" }, { "body": "> The logic is clear to developers, but the current error is confusing to users.\n\nI suspect this mostly comes up in the \"I misspelled kuromoji\" kind of use case.\n", "created_at": "2016-03-21T18:11:42Z" }, { "body": "Perhaps we should improve the usage information, and throw a UserError with USAGE here then. This way we don't have to repeat in this case, and in the general help.\n", "created_at": "2016-03-21T18:14:11Z" }, { "body": "I think need to show message like follows:\n\n<pre>\n$elasticsearch-plugin install lipsum\nNot found as core plugin.\nNot found as maven artifact.\nNot found as URL.\nUsages:\ninstall name\ninstall groupId:artifactId:version\ninstall file:///name.zip\n</pre>\n\nWhy github isn't supported now (5.x)?\n", "created_at": "2016-04-27T21:49:07Z" }, { "body": "> Why github isn't supported now (5.x)?\n\nSite plugins are no longer supported, only Java plugins. Site plugins should be reimplemented as Kibana plugins.\n", "created_at": "2016-04-29T08:34:14Z" }, { "body": "It's easy enough to figure out whether the argument looks like a protocol or not, and it would be much friendlier to say \"unknown plugin\" in this case than \"no protocol: foo\". Also, the current usage method doesn't mention that you can pass a URL or maven coordinates to install.\n", "created_at": "2016-05-05T07:28:53Z" }, { "body": "I opened #18876 to give a nicer error message. It does not yet force listing out all the acceptable patterns for installing, but that is a broader issue I think we need to address with real usage lines.\n", "created_at": "2016-06-15T03:07:13Z" } ], "number": 17226, "title": "PluginManager: Installing non-existing plugin returns confusing error message" }
{ "body": "When installing plugins, we first try the elastic download service for\nofficial plugins, then try maven coordinates, and finally try the\nargument as a url. This can lead to confusing error messages about\nunknown protocols when eg an official plugin name is mispelled. This\nchange adds a heuristic for determining if the argument in the final\ncase is in fact a url that we should try, and gives a simplified error\nmessage in the case it is definitely not a url.\n\ncloses #17226\n", "number": 18876, "review_comments": [ { "body": "I used to use file:/path/to/file.zip instead of file://\nNot sure if it respects standards but it works.\n", "created_at": "2016-06-15T05:37:21Z" }, { "body": "Also I use sometimes (in workshops) file:../relative/path\n", "created_at": "2016-06-15T05:38:18Z" }, { "body": " I can change the check to look for \":/\"\n", "created_at": "2016-06-15T05:50:54Z" }, { "body": "Actually I reverted this. As I thought, file URLs still must contain double slash (see https://en.wikipedia.org/wiki/File_URI_scheme for example). While java URL will parse a URL with a single slash, it doesn't actually work when trying to read the file.\n", "created_at": "2016-06-15T06:44:25Z" }, { "body": "Fair enough. I know it used to work in previous version but I'm fine with this implementation.\nAnd even better it will be consistent with Kibana plugin manager which also checks that `://` exists.\n", "created_at": "2016-06-15T08:15:13Z" } ], "title": "Emit nicer error message when trying to install unknown plugin" }
{ "commits": [ { "message": "Plugins: Emit nicer error message when trying to install unknown plugin\n\nWhen installing plugins, we first try the elastic download service for\nofficial plugins, then try maven coordinates, and finally try the\nargument as a url. This can lead to confusing error messages about\nunknown protocols when eg an official plugin name is mispelled. This\nchange adds a heuristic for determining if the argument in the final\ncase is in fact a url that we should try, and gives a simplified error\nmessage in the case it is definitely not a url.\n\ncloses #17226" }, { "message": "Add test for plugin install heuristic" } ], "files": [ { "diff": "@@ -237,6 +237,10 @@ private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Ex\n }\n \n // fall back to plain old URL\n+ if (pluginId.contains(\"://\") == false) {\n+ // definitely not a valid url, so assume it is a plugin name\n+ throw new UserError(ExitCodes.USAGE, \"Unknown plugin \" + pluginId);\n+ }\n terminal.println(\"-> Downloading \" + URLDecoder.decode(pluginId, \"UTF-8\"));\n return downloadZip(terminal, pluginId, tmpDir);\n }", "filename": "core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java", "status": "modified" }, { "diff": "@@ -307,6 +307,12 @@ public void testMalformedUrlNotMaven() throws Exception {\n assertTrue(e.getMessage(), e.getMessage().contains(\"no protocol\"));\n }\n \n+ public void testUnknownPlugin() throws Exception {\n+ Tuple<Path, Environment> env = createEnv(fs, temp);\n+ UserError e = expectThrows(UserError.class, () -> installPlugin(\"foo\", env.v1()));\n+ assertTrue(e.getMessage(), e.getMessage().contains(\"Unknown plugin foo\"));\n+ }\n+\n public void testPluginsDirMissing() throws Exception {\n Tuple<Path, Environment> env = createEnv(fs, temp);\n Files.delete(env.v2().pluginsFile());", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\n5.0.0. alpha 3\n**JVM version**:\n1.8.0-92\n**OS version**:\nUbuntu 3.16.0-71-lowlatency\n**Description of the problem including expected versus actual behavior**:\nTrying to install plugins with elasticsearch-plugin command, Checking new syntax with out any option or help gives error below:\n**Steps to reproduce**:\n just run command\n\n```\n./elasticsearch-plugin\n```\n\n**Provide logs (if relevant)**:\n\n```\n/usr/share/elasticsearch/bin# ./elasticsearch-plugin\nA tool for managing installed elasticsearch plugins\n\nCommands\n--------\nlist - Lists installed elasticsearch plugins\ninstall - Install a plugin\nremove - Removes a plugin from elasticsearch\n\nNon-option arguments:\ncommand\n\nOption Description\n------ -----------\n-h, --help show help\n-s, --silent show minimal output\n-v, --verbose show verbose output\n_**ERROR: E is not a recognized option**_\n\n```\n\n<!--\nIf you are filing a feature request, please remove the above bug\nreport block and provide responses for all of the below items.\n-->\n\n**Describe the feature**:\n", "comments": [ { "body": "This is because you have not specified a command, one of `list`, `install`, or `remove`. The reason for the error message is because the plugin script passes something like `-Edefault.path.conf=/Users/jason/elasticsearch/elasticsearch-5.0.0-alpha3/config` to the `org.elasticsearch.plugins.PluginCli` Java class as a program argument. When you specify a command, this argument will trickle down to the appropriate sub-command. However, since you have not, `org.elasticsearch.plugins.PluginCli` itself is attempting to parse, but this command-line argument does not mean anything to the top-level class. I'm afraid there is not an issue here, but please let us know if you think otherwise?\n", "created_at": "2016-06-01T21:01:33Z" }, { "body": "This is a bug IMO. I've seen this myself, and I imagine it would be very confusing to a user.\n", "created_at": "2016-06-01T21:14:14Z" }, { "body": "@jasontedor Now that custom plugin paths are gone, what need is there for reading elasticsearch.yml?\n", "created_at": "2016-06-01T21:15:56Z" }, { "body": "> Now that custom plugin paths are gone, what need is there for reading elasticsearch.yml?\n\nThere isn't a need to read elasticsearch.yml, but there is a need to know where `path.conf` is for plugins that do package a plugin config file.\n", "created_at": "2016-06-02T02:47:38Z" }, { "body": "LGTM.\n", "created_at": "2016-09-14T01:35:38Z" } ], "number": 18689, "title": "Unxepected error when running command elasticsearch-plugin" }
{ "body": "Currently we always pass -E to the the plugin cli with the conf dir, but\nthis causes a very confusing error message when not giving a specific\ncommand to the plugin cli. This change makes path.conf pass just like\npath.home. These are special settings, so passing via sysprops is the\nright thing to do (it is all about how we pass between shell and java\ncli).\n\ncloses #18689\n", "number": 18870, "review_comments": [ { "body": "Unfortunately, this is not going to be quoting properly.\n", "created_at": "2016-06-30T19:02:20Z" } ], "title": "Use sysprop like with es.path.home to pass conf dir" }
{ "commits": [ { "message": "Plugins: Use sysprop like with es.path.home to pass conf dir\n\nCurrently we always pass -E to the the plugin cli with the conf dir, but\nthis causes a very confusing error message when not giving a specific\ncommand to the plugin cli. This change makes path.conf pass just like\npath.home. These are special settings, so passing via sysprops is the\nright thing to do (it is all about how we pass between shell and java\ncli).\n\ncloses #18689" }, { "message": "Merge branch 'master' into plugin_missing_command" } ], "files": [ { "diff": "@@ -82,9 +82,10 @@ HOSTNAME=`hostname | cut -d. -f1`\n export HOSTNAME\n \n declare -a args=(\"$@\")\n+path_props=(-Des.path.home=\"$ES_HOME\")\n \n if [ -e \"$CONF_DIR\" ]; then\n- args=(\"${args[@]}\" -Edefault.path.conf=\"$CONF_DIR\")\n+ path_props=(\"${path_props[@]}\" -Des.path.conf=\"$CONF_DIR\")\n fi\n \n-exec \"$JAVA\" $ES_JAVA_OPTS -Delasticsearch -Des.path.home=\"$ES_HOME\" -cp \"$ES_HOME/lib/*\" org.elasticsearch.plugins.PluginCli \"${args[@]}\"\n+exec \"$JAVA\" $ES_JAVA_OPTS -Delasticsearch \"${path_props[@]}\" -cp \"$ES_HOME/lib/*\" org.elasticsearch.plugins.PluginCli \"${args[@]}\"", "filename": "distribution/src/main/resources/bin/elasticsearch-plugin", "status": "modified" }, { "diff": "@@ -17,9 +17,14 @@ for %%I in (\"%SCRIPT_DIR%..\") do set ES_HOME=%%~dpfI\n \n TITLE Elasticsearch Plugin Manager ${project.version}\n \n+SET path_props=-Des.path.home=\"%ES_HOME%\"\n+IF DEFINED CONF_DIR (\n+ SET path_props=!path_props! -Des.path.conf=\"%CONF_DIR%\"\n+)\n+\n SET args=%*\n SET HOSTNAME=%COMPUTERNAME%\n \n-\"%JAVA%\" %ES_JAVA_OPTS% -Des.path.home=\"%ES_HOME%\" -cp \"%ES_HOME%/lib/*;\" \"org.elasticsearch.plugins.PluginCli\" !args!\n+\"%JAVA%\" %ES_JAVA_OPTS% !path_props! -cp \"%ES_HOME%/lib/*;\" \"org.elasticsearch.plugins.PluginCli\" !args!\n \n ENDLOCAL", "filename": "distribution/src/main/resources/bin/elasticsearch-plugin.bat", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.0.0-alpha3\n\n**OS version**: Windows 10\n\n**Description of the problem including expected versus actual behavior**:\n\n`flat_settings` is not respected on nodes info.\n\n**Steps to reproduce**:\n1. call `/_nodes/_all/settings?flat_settings=true&timeout=2s&pretty=true`\n2. observe the settings response is not flat e.g.\n\n``` json\n{\n \"_nodes\" : {\n \"total\" : 1,\n \"successful\" : 1,\n \"failed\" : 0\n },\n \"cluster_name\" : \"sniffroledetection-cluster-b857db\",\n \"nodes\" : {\n \"0f3jW5LyR4K2lYQl8arDAw\" : {\n \"name\" : \"sniffroledetection-node-b857db\",\n \"transport_address\" : \"127.0.0.1:9300\",\n \"host\" : \"127.0.0.1\",\n \"ip\" : \"127.0.0.1\",\n \"version\" : \"5.0.0-alpha3\",\n \"build_hash\" : \"cad959b\",\n \"http_address\" : \"127.0.0.1:9200\",\n \"roles\" : [ \"master\", \"ingest\" ],\n \"attributes\" : {\n \"testingcluster\" : \"true\"\n },\n \"settings\" : {\n \"cluster\" : {\n \"name\" : \"sniffroledetection-cluster-b857db\"\n },\n \"node\" : {\n \"name\" : \"sniffroledetection-node-b857db\",\n \"attr\" : {\n \"testingcluster\" : \"true\"\n },\n \"data\" : \"false\",\n \"master\" : \"true\"\n },\n \"path\" : {\n \"logs\" : \"C:/Users/russ/AppData/Roaming/NEST/5.0.0-alpha3/elasticsearch-5.0.0-alpha3/logs\",\n \"home\" : \"C:\\\\Users\\\\russ\\\\AppData\\\\Roaming\\\\NEST\\\\5.0.0-alpha3\\\\elasticsearch-5.0.0-alpha3\",\n \"repo\" : \"C:\\\\Users\\\\russ\\\\AppData\\\\Roaming\\\\NEST\\\\5.0.0-alpha3\\\\repositories\"\n },\n \"client\" : {\n \"type\" : \"node\"\n },\n \"threadpool\" : {\n \"watcher\" : {\n \"queue_size\" : \"1000\",\n \"size\" : \"40\",\n \"type\" : \"fixed\"\n }\n },\n \"script\" : {\n \"inline\" : \"true\",\n \"stored\" : \"true\"\n },\n \"xpack\" : {\n \"security\" : {\n \"enabled\" : \"false\"\n }\n }\n }\n }\n }\n}\n```\n\n`flat_settings` is honored in 5.0.0-alpha1 and 5.0.0-alpha2 so it appears to be a regression in 5.0.0-alpha3.\n", "comments": [], "number": 18794, "title": "flat_settings ignored on nodes info in 5.0.0-alpha3" }
{ "body": "Closes #18794\n", "number": 18860, "review_comments": [ { "body": "Nice\n", "created_at": "2016-06-16T13:56:07Z" } ], "title": "Get XContent params from request in Nodes rest actions" }
{ "commits": [ { "message": "Get XContent params from request in Nodes rest actions" }, { "message": "Adding test for nodes info rest api" } ], "files": [ { "diff": "@@ -275,7 +275,7 @@ public NodesResponseRestListener(RestChannel channel) {\n \n @Override\n public RestResponse buildResponse(NodesResponse response, XContentBuilder builder) throws Exception {\n- return RestActions.nodesResponse(builder, ToXContent.EMPTY_PARAMS, response);\n+ return RestActions.nodesResponse(builder, channel.request(), response);\n }\n \n }", "filename": "core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java", "status": "modified" }, { "diff": "@@ -0,0 +1,19 @@\n+---\n+\"node_info test flat_settings\":\n+ - do:\n+ cluster.state: {}\n+\n+ - set: { master_node: master }\n+\n+ - do:\n+ nodes.info:\n+ metric: [ settings ]\n+\n+ - match : { nodes.$master.settings.client.type: node }\n+\n+ - do:\n+ nodes.info:\n+ metric: [ settings ]\n+ flat_settings: true\n+\n+ - match : { nodes.$master.settings.client\\.type: node }", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yaml", "status": "added" } ] }
{ "body": "Hi,\n\nOur process involve a bulk loading of one index (index_b) while a second is used in production (index_a), at the end of the bulk loading we swap the alias (index_alias) to index_b and close the first one, this process happen three times a day. Here's a more detailled list of actions we perform on the index\n- (index used in production => index_a)\n- open index_b\n- disable refresh_interval\n- bulk load in index_b\n- send a refresh request\n- optimize\n- wait for each node to have merges: 0 for index_b\n- swap alias\n- close index_a\n\nDuring the bulk loading of index_b (or index_a since the process happen 3 times a day) two of the three nodes will be stuck in relocating loop of one or multiple shards, not always the same on a different node with the following log :\n\n```\n[2015-07-02 08:57:40,322][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][1]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][1] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][1], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-02 08:57:40,339][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 73587\n[2015-07-02 08:57:45,339][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-02 08:57:50,340][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][1]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][1] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][1], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-02 08:57:50,341][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [ALL] to [NONE]\n[2015-07-02 08:57:50,345][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 73588\n```\n\nThe nodes will (until restart) exchange cluster state version indefinitely \n\n```\nreceived cluster state version 76797\nreceived cluster state version 76798\nreceived cluster state version 76799\nreceived cluster state version 76800\nreceived cluster state version 76801\nreceived cluster state version 76802 \n```\n\nAt this stage the cluster is green, but when the second time our process run, it'll trigger a yellow and sometimes a red cluster\n\nThings we've tried:\n- upgrading (1.5.2, 1.5.3, 1.6.0)\n- rolling restart of each e.s process\n- complete rolling replacement of each of the 3 nodes (including data on disk)\n- adding more nodes (from 3 to 5)\n\nThe only way to fix this is to restart one of the nodes involved in the relocation process\n\n```\n[2015-07-02 08:57:59,708][INFO ][node ] [elasticsearch-nodes2.localdomain] stopping ...\n[2015-07-02 08:57:59,725][DEBUG][discovery.zen.fd ] [elasticsearch-nodes2.localdomain] [master] stopping fault detection against master [[elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}], reason [zen disco stop]\n[2015-07-02 08:57:59,792][INFO ][node ] [elasticsearch-nodes2.localdomain] stopped\n[2015-07-02 08:57:59,792][INFO ][node ] [elasticsearch-nodes2.localdomain] closing ...\n[2015-07-02 08:57:59,799][DEBUG][com.amazonaws.http.IdleConnectionReaper] Reaper thread:\njava.lang.InterruptedException: sleep interrupted\n at java.lang.Thread.sleep(Native Method)\n at com.amazonaws.http.IdleConnectionReaper.run(IdleConnectionReaper.java:112)\n[2015-07-02 08:57:59,800][DEBUG][com.amazonaws.http.IdleConnectionReaper] Shutting down reaper thread.\n[2015-07-02 08:58:09,806][WARN ][cluster.action.index ] [elasticsearch-nodes2.localdomain] [shopper_chargement_prod_a] failed to lock all shards for index - timed out after 30 seconds\n[2015-07-02 08:58:09,814][INFO ][node ] [elasticsearch-nodes2.localdomain] closed\n\n\n\n[2015-07-02 08:58:41,442][INFO ][node ] [elasticsearch-nodes2.localdomain] version[1.6.0], pid[45115], build[cdd3ac4/2015-06-09T13:36:34Z]\n[2015-07-02 08:58:41,443][INFO ][node ] [elasticsearch-nodes2.localdomain] initializing ...\n[2015-07-02 08:58:41,459][INFO ][plugins ] [elasticsearch-nodes2.localdomain] loaded [cloud-aws], sites [HQ, whatson, kopf]\n[2015-07-02 08:58:41,499][INFO ][env ] [elasticsearch-nodes2.localdomain] using [1] data paths, mounts [[/srv/data (/dev/mapper/lvm--raid--0-lvm0)]], net usable_space [471.4gb], net total_space [499.6gb], types [xfs]\n[2015-07-02 08:58:44,350][INFO ][node ] [elasticsearch-nodes2.localdomain] initialized\n[2015-07-02 08:58:44,350][INFO ][node ] [elasticsearch-nodes2.localdomain] starting ...\n[2015-07-02 08:58:44,527][INFO ][transport ] [elasticsearch-nodes2.localdomain] bound_address {inet[/0.0.0.0:9300]}, publish_address {inet[/10.210.14.19:9300]}\n[2015-07-02 08:58:44,546][INFO ][discovery ] [elasticsearch-nodes2.localdomain] es-cluster/p6IyMeFHRCey0kRffbHgDw\n[2015-07-02 08:58:48,598][INFO ][cluster.service ] [elasticsearch-nodes2.localdomain] detected_master [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}, added {[elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1},[elasticsearch-nodes1.localdomain][DsZ08lNfSF6EwvAMSoehng][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1},}, reason: zen-disco-receive(from master [[elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}])\n[2015-07-02 08:58:48,606][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [ALL] to [NONE]\n[2015-07-02 08:58:48,606][INFO ][indices.recovery ] [elasticsearch-nodes2.localdomain] updating [indices.recovery.translog_size] from [512kb] to [2mb]\n[2015-07-02 08:58:48,649][INFO ][http ] [elasticsearch-nodes2.localdomain] bound_address {inet[/0.0.0.0:9200]}, publish_address {inet[/10.210.14.19:9200]}\n[2015-07-02 08:58:48,649][INFO ][node ] [elasticsearch-nodes2.localdomain] started\n[2015-07-02 08:59:07,416][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 73591\n[2015-07-02 08:59:07,417][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [NONE] to [ALL]\n[2015-07-02 08:59:07,424][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 73592\n[2015-07-02 08:59:07,842][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] started recovery from [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}, id [1]\n[2015-07-02 08:59:07,844][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][0] [1]\n[2015-07-02 08:59:07,845][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] starting recovery from [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-02 08:59:07,858][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] started recovery from [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}, id [2]\n[2015-07-02 08:59:07,859][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][1] [2]\n[2015-07-02 08:59:07,859][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] starting recovery from [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-02 08:59:07,866][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.indices.recovery.DelayRecoveryException: source node does not have the shard listed in its state as allocated on the node\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:108)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-02 08:59:07,866][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.indices.recovery.DelayRecoveryException: source node does not have the shard listed in its state as allocated on the node\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:108)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-02 08:59:07,868][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] will retrying recovery with id [1] in [500ms] (reason [source node does not have the shard listed in its state as allocated on the node])\n[2015-07-02 08:59:07,868][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] will retrying recovery with id [2] in [500ms] (reason [source node does not have the shard listed in its state as allocated on the node])\n[2015-07-02 08:59:08,369][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][1] [2]\n[2015-07-02 08:59:08,369][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][0] [1]\n[2015-07-02 08:59:08,370][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] starting recovery from [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-02 08:59:08,370][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] starting recovery from [elasticsearch-nodes3.localdomain][o_9lrwRfTn6bbIztX9OCvA][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n```\n\nSetup:\nAWS 3 nodes, eu-west1 a,b,c zones, nginx proxy on each of the three nodes, one ELB with SSL offloading and round robin on the three nginx\ndata stored on EBS volumes\n\nES config:\n\n```\ncluster.name: es-cluster\nnode.name: elasticsearch-nodes2.localdomain\nnode.max_local_storage_nodes: 1\n\nindex.mapper.dynamic: true\naction.auto_create_index: true\naction.disable_delete_all_indices: true\n\npath.conf: /usr/local/etc/elasticsearch\npath.data: /srv/data/elasticsearch/data\npath.logs: /srv/data/elasticsearch/logs\n\nbootstrap.mlockall: true\n\nhttp.port: 9200\n\ngateway.expected_nodes: 1\ndiscovery.type: ec2\n\ndiscovery.zen.minimum_master_nodes: 2\ndiscovery.zen.ping.multicast.enabled: false\n\ncloud.node.auto_attributes: true\ncloud.aws.region: eu-west-1\ndiscovery.ec2.tag: custom-es-cluster-tag\n\naction.disable_delete_all_indices: true\ndiscovery.zen.fd.ping_timeout: 15s\ngateway.recover_after_nodes: 2\ngateway.recover_after_time: 5m\nhttp.compression: true\nhttp.cors.allow-origin: '*'\nhttp.cors.enabled: true\nindices.store.throttle.max_bytes_per_sec: 100mb\nthreadpool.bulk.queue_size: 3000\ntransport.tcp.compress: true\n```\n\nES Index settings:\n\n```\n{\n \"index_b\":{\n \"settings\":{\n \"index\":{\n \"creation_date\":\"1435765694828\",\n \"uuid\":\"JjqbLn6CS1q0nwaw5HhIpA\",\n \"analysis\":{\n \"filter\":{\n \"my_word_delimiter\":{\n \"type\":\"word_delimiter\",\n \"split_on_numerics\":\"true\"\n },\n \"french_stemmer\":{\n \"type\":\"stemmer\",\n \"name\":\"light_french\"\n },\n \"limit_token\":{\n \"type\":\"limit\",\n \"max_token_count\":\"7\"\n },\n \"french_stopwords\":{\n \"type\":\"stop\",\n \"stopwords\":\"_french_\"\n }\n },\n \"analyzer\":{\n \"word_delimiter_stopwfr\":{\n \"filter\":[\n \"my_word_delimiter\",\n \"asciifolding\",\n \"lowercase\",\n \"french_stopwords\"\n ],\n \"tokenizer\":\"digitletter\"\n },\n \"word_delimiter\":{\n \"filter\":[\n \"my_word_delimiter\",\n \"lowercase\",\n \"asciifolding\"\n ],\n \"tokenizer\":\"digitletter\"\n },\n \"word_delimiter_stopwfr_stemfr\":{\n \"filter\":[\n \"my_word_delimiter\",\n \"asciifolding\",\n \"lowercase\",\n \"french_stopwords\",\n \"french_stemmer\"\n ],\n \"tokenizer\":\"digitletter\"\n },\n \"word_delimiter_limit\":{\n \"filter\":[\n \"my_word_delimiter\",\n \"lowercase\",\n \"limit_token\",\n \"asciifolding\"\n ],\n \"tokenizer\":\"digitletter\"\n }\n },\n \"tokenizer\":{\n \"digitletter\":{\n \"pattern\":\"[^\\\\p{Ll}\\\\p{Lu}0-9]+\",\n \"type\":\"pattern\"\n }\n }\n },\n \"number_of_replicas\":\"1\",\n \"number_of_shards\":\"6\",\n \"refresh_interval\":\"-1\",\n \"version\":{\n \"created\":\"1060099\"\n }\n }\n }\n }\n}\n```\n", "comments": [ { "body": "Hi @womwombat \n\nMy first guess would be heavy I/O caused by the optimize process. You say you're using EBS volumes, without provisioned IOPS? EBS is pretty slow unless you have provisioned IOPS, and you've set the merge throttling to a too high value for EBS.\n\nSo I think the optimize uses all the I/O which stops Elasticsearch from obtaining the file system lock quickly enough. Try reducing the throttling.\n\nas a side note, you have `gateway.recover_after_nodes` set to 2, but `expected_nodes` set to 1. This doesn't make sense. It waits for the expected nodes and only falls back to recover_after_nodes if it is still waiting when the recover_after_time period has expired. You should set expected nodes to 3 in your case (or however many nodes you plan on having)\n", "created_at": "2015-07-05T16:52:28Z" }, { "body": "Hi,\n\nAs a side note, this issue arise suddently, we've been running this cluster for weeks (~ 8 weeks at least), but following your wise advise we have set the indices.store.throttle.max_bytes_per_sec: 5mb.\n\nWe have fixed what was a typo in the expected_nodes / gateway_recover_after_nodes too.\n\nHowever the issue still goes on :/\n\n```\n[2015-07-08 08:30:49,271][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][2]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][2] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][2], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:30:49,317][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89042\n[2015-07-08 08:30:54,359][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:30:59,360][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][5]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][5] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][5], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:30:59,372][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89043\n[2015-07-08 08:31:04,413][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][4]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][4] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][4], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:31:09,414][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][3]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][3] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][3], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:31:14,415][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][1]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][1] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][1], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:31:14,424][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89044\n[2015-07-08 08:31:14,466][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [ALL] to [NONE]\n[2015-07-08 08:31:14,474][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89045\n[2015-07-08 08:31:38,228][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:31:38,228][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:31:38,228][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:31:38,228][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:32:05,411][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89046\n[2015-07-08 08:32:05,412][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [NONE] to [ALL]\n[2015-07-08 08:32:05,418][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89047\n[2015-07-08 08:32:10,460][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][1]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][1] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][1], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:15,461][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][5]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][5] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][5], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:20,462][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][2]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][2] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][2], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:20,475][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89048\n[2015-07-08 08:32:25,516][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][4]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][4] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][4], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:30,517][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:35,517][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][3]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][3] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][3], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:35,532][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89049\n[2015-07-08 08:32:38,118][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:32:38,118][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:32:38,118][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:32:38,118][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:32:40,573][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][1]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][1] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][1], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:45,574][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][5]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [index_a][5] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [index_a][5], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:576)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:504)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-07-08 08:32:45,584][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89050\n[2015-07-08 08:32:45,626][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [ALL] to [NONE]\n[2015-07-08 08:32:45,662][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89051\n[2015-07-08 08:33:39,685][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:33:39,686][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:33:39,686][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:33:39,686][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:34:41,144][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:34:41,145][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:34:41,145][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:34:41,145][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:35:37,612][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:35:37,612][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:35:37,612][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:35:37,612][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:36:41,024][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:36:41,025][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:36:41,025][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:36:41,025][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:37:14,212][INFO ][node ] [elasticsearch-nodes2.localdomain] stopping ...\n[2015-07-08 08:37:14,254][WARN ][netty.channel.DefaultChannelPipeline] An exception was thrown by an exception handler.\njava.util.concurrent.RejectedExecutionException: Worker has already been shutdown\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.registerTask(AbstractNioSelector.java:120)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:72)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:56)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioChannelSink.execute(AbstractNioChannelSink.java:34)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.execute(DefaultChannelPipeline.java:636)\n at org.elasticsearch.common.netty.channel.Channels.fireExceptionCaughtLater(Channels.java:496)\n at org.elasticsearch.common.netty.channel.AbstractChannelSink.exceptionCaught(AbstractChannelSink.java:46)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.notifyHandlerException(DefaultChannelPipeline.java:658)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:781)\n at org.elasticsearch.common.netty.channel.Channels.write(Channels.java:725)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.doEncode(OneToOneEncoder.java:71)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:59)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.common.netty.handler.codec.http.HttpContentEncoder.writeRequested(HttpContentEncoder.java:138)\n at org.elasticsearch.common.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:254)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.handleDownstream(HttpPipeliningHandler.java:87)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:582)\n at org.elasticsearch.http.netty.NettyHttpChannel.sendResponse(NettyHttpChannel.java:195)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.finishHim(TransportBulkAction.java:360)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.onFailure(TransportBulkAction.java:355)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.finishAsFailed(TransportShardReplicationOperationAction.java:536)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$3.onClusterServiceClose(TransportShardReplicationOperationAction.java:509)\n at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onClose(ClusterStateObserver.java:217)\n at org.elasticsearch.cluster.service.InternalClusterService.doStop(InternalClusterService.java:174)\n at org.elasticsearch.common.component.AbstractLifecycleComponent.stop(AbstractLifecycleComponent.java:105)\n at org.elasticsearch.node.internal.InternalNode.stop(InternalNode.java:307)\n at org.elasticsearch.node.internal.InternalNode.close(InternalNode.java:331)\n at org.elasticsearch.bootstrap.Bootstrap$1.run(Bootstrap.java:82)\n[2015-07-08 08:37:14,256][DEBUG][discovery.zen.fd ] [elasticsearch-nodes2.localdomain] [master] stopping fault detection against master [[elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_\nstorage_nodes=1}], reason [zen disco stop]\n[2015-07-08 08:37:14,296][WARN ][netty.channel.DefaultChannelPipeline] An exception was thrown by an exception handler.\njava.util.concurrent.RejectedExecutionException: Worker has already been shutdown\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.registerTask(AbstractNioSelector.java:120)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:72)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:56)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioChannelSink.execute(AbstractNioChannelSink.java:34)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.execute(DefaultChannelPipeline.java:636)\n at org.elasticsearch.common.netty.channel.Channels.fireExceptionCaughtLater(Channels.java:496)\n at org.elasticsearch.common.netty.channel.AbstractChannelSink.exceptionCaught(AbstractChannelSink.java:46)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.notifyHandlerException(DefaultChannelPipeline.java:658)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:781)\n at org.elasticsearch.common.netty.channel.Channels.write(Channels.java:725)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.doEncode(OneToOneEncoder.java:71)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:59)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.common.netty.handler.codec.http.HttpContentEncoder.writeRequested(HttpContentEncoder.java:138)\n at org.elasticsearch.common.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:254)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.handleDownstream(HttpPipeliningHandler.java:87)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:582)\n at org.elasticsearch.http.netty.NettyHttpChannel.sendResponse(NettyHttpChannel.java:195)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.finishHim(TransportBulkAction.java:360)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.onFailure(TransportBulkAction.java:355)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.finishAsFailed(TransportShardReplicationOperationAction.java:536)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$2.handleException(TransportShardReplicationOperationAction.java:481)\n at org.elasticsearch.transport.TransportService$2.run(TransportService.java:178)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:37:14,370][INFO ][node ] [elasticsearch-nodes2.localdomain] stopped\n[2015-07-08 08:37:14,370][INFO ][node ] [elasticsearch-nodes2.localdomain] closing ...\n[2015-07-08 08:37:14,378][DEBUG][com.amazonaws.http.IdleConnectionReaper] Reaper thread:\njava.lang.InterruptedException: sleep interrupted\n at java.lang.Thread.sleep(Native Method)\n at com.amazonaws.http.IdleConnectionReaper.run(IdleConnectionReaper.java:112)\n[2015-07-08 08:37:14,378][DEBUG][com.amazonaws.http.IdleConnectionReaper] Shutting down reaper thread.\n```\n\nAnd when we stop/start the node:\n\n```\n[2015-07-08 08:37:57,146][INFO ][node ] [elasticsearch-nodes2.localdomain] version[1.6.0], pid[33261], build[cdd3ac4/2015-06-09T13:36:34Z]\n[2015-07-08 08:37:57,146][INFO ][node ] [elasticsearch-nodes2.localdomain] initializing ...\n[2015-07-08 08:37:57,163][INFO ][plugins ] [elasticsearch-nodes2.localdomain] loaded [cloud-aws], sites [HQ, whatson, kopf]\n[2015-07-08 08:37:57,203][INFO ][env ] [elasticsearch-nodes2.localdomain] using [1] data paths, mounts [[/srv/data (/dev/mapper/lvm--raid--0-lvm0)]], net usable_space [475.9gb], net total_space [499.6gb], types [xfs]\n[2015-07-08 08:38:00,074][INFO ][node ] [elasticsearch-nodes2.localdomain] initialized\n[2015-07-08 08:38:00,074][INFO ][node ] [elasticsearch-nodes2.localdomain] starting ...\n[2015-07-08 08:38:00,254][INFO ][transport ] [elasticsearch-nodes2.localdomain] bound_address {inet[/0.0.0.0:9300]}, publish_address {inet[/10.210.14.19:9300]}\n[2015-07-08 08:38:00,273][INFO ][discovery ] [elasticsearch-nodes2.localdomain] cluster-es/t7ZN91B7Se2qT6NsRRau5g\n[2015-07-08 08:38:04,343][INFO ][cluster.service ] [elasticsearch-nodes2.localdomain] detected_master [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}, added {[elastics\nearch-nodes3.localdomain][Bg5OX3aoTK-pWNImAkf-vw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1},[elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_z\none=eu-west-1c, max_local_storage_nodes=1},}, reason: zen-disco-receive(from master [[elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}])\n[2015-07-08 08:38:04,350][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [ALL] to [NONE]\n[2015-07-08 08:38:04,350][INFO ][indices.recovery ] [elasticsearch-nodes2.localdomain] updating [indices.recovery.translog_size] from [512kb] to [2mb]\n[2015-07-08 08:38:04,351][INFO ][indices.store ] [elasticsearch-nodes2.localdomain] updating indices.store.throttle.max_bytes_per_sec from [5mb] to [100mb], note, type is [MERGE]\n[2015-07-08 08:38:04,394][INFO ][http ] [elasticsearch-nodes2.localdomain] bound_address {inet[/0.0.0.0:9200]}, publish_address {inet[/10.210.14.19:9200]}\n[2015-07-08 08:38:04,394][INFO ][node ] [elasticsearch-nodes2.localdomain] started\n[2015-07-08 08:39:14,425][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:39:14,425][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:39:14,426][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:39:14,426][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:39:35,655][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89054\n[2015-07-08 08:39:35,655][INFO ][cluster.service ] [elasticsearch-nodes2.localdomain] removed {[elasticsearch-nodes3.localdomain][Bg5OX3aoTK-pWNImAkf-vw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1},}, reason: zen-disco-rece\nive(from master [[elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}])\n[2015-07-08 08:39:35,745][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89055\n[2015-07-08 08:39:36,153][DEBUG][index.gateway ] [elasticsearch-nodes2.localdomain] [index_b][4] starting recovery from local ...\n[2015-07-08 08:39:36,155][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][4] using existing shard data, translog id [1436278480889]\n[2015-07-08 08:39:36,158][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][4] try recover from translog file translog-1436278480889 locations: [/srv/data/elasticsearch/data/cluster-es/nodes/0/indices/index_b/4/translog]\n[2015-07-08 08:39:36,159][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][4] Translog file found in /srv/data/elasticsearch/data/cluster-es/nodes/0/indices/index_b/4/translog - renaming\n[2015-07-08 08:39:36,159][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][4] Renamed translog from translog-1436278480889 to translog-1436278480889.recovering\n[2015-07-08 08:39:36,167][DEBUG][index.gateway ] [elasticsearch-nodes2.localdomain] [index_b][5] starting recovery from local ...\n[2015-07-08 08:39:36,170][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][5] using existing shard data, translog id [1436278480893]\n[2015-07-08 08:39:36,171][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][5] try recover from translog file translog-1436278480893 locations: [/srv/data/elasticsearch/data/cluster-es/nodes/0/indices/index_b/5/translog]\n[2015-07-08 08:39:36,171][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][5] Translog file found in /srv/data/elasticsearch/data/cluster-es/nodes/0/indices/index_b/5/translog - renaming\n[2015-07-08 08:39:36,171][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][5] Renamed translog from translog-1436278480893 to translog-1436278480893.recovering\n[2015-07-08 08:39:36,527][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][5] recovering translog file: /srv/data/elasticsearch/data/cluster-es/nodes/0/indices/index_b/5/translog/translog-1436278480893.recovering length: 17\n[2015-07-08 08:39:36,527][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][4] recovering translog file: /srv/data/elasticsearch/data/cluster-es/nodes/0/indices/index_b/4/translog/translog-1436278480889.recovering length: 17\n[2015-07-08 08:39:36,527][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][4] ignoring translog EOF exception, the last operation was not properly written\njava.io.EOFException\n at org.elasticsearch.common.io.stream.InputStreamStreamInput.readByte(InputStreamStreamInput.java:43)\n at org.elasticsearch.common.io.stream.StreamInput.readInt(StreamInput.java:116)\n at org.elasticsearch.index.translog.ChecksummedTranslogStream.read(ChecksummedTranslogStream.java:59)\n at org.elasticsearch.index.gateway.local.LocalIndexShardGateway.recover(LocalIndexShardGateway.java:267)\n at org.elasticsearch.index.gateway.IndexShardGatewayService$1.run(IndexShardGatewayService.java:112)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:39:36,527][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_b][5] ignoring translog EOF exception, the last operation was not properly written\njava.io.EOFException\n at org.elasticsearch.common.io.stream.InputStreamStreamInput.readByte(InputStreamStreamInput.java:43)\n at org.elasticsearch.common.io.stream.StreamInput.readInt(StreamInput.java:116)\n at org.elasticsearch.index.translog.ChecksummedTranslogStream.read(ChecksummedTranslogStream.java:59)\n at org.elasticsearch.index.gateway.local.LocalIndexShardGateway.recover(LocalIndexShardGateway.java:267)\n at org.elasticsearch.index.gateway.IndexShardGatewayService$1.run(IndexShardGatewayService.java:112)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:39:36,532][TRACE][index.gateway ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery completed from local, took [379ms]\n index : files [13] with total_size [3gb], took[6ms]\n : recovered_files [0] with total_size [0b]\n : reusing_files [13] with total_size [3gb]\n start : took [368ms], check_index [0s]\n translog : number_of_operations [0], took [4ms]\n[2015-07-08 08:39:36,532][TRACE][index.gateway ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery completed from local, took [364ms]\n index : files [13] with total_size [3.1gb], took[3ms]\n : recovered_files [0] with total_size [0b]\n : reusing_files [13] with total_size [3.1gb]\n start : took [356ms], check_index [0s]\n translog : number_of_operations [0], took [4ms]\n[2015-07-08 08:39:36,536][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89056\n[2015-07-08 08:39:59,521][DEBUG][com.amazonaws.internal.SdkSSLSocket] shutting down output of ec2.eu-west-1.amazonaws.com/178.236.7.129:443\n[2015-07-08 08:39:59,521][DEBUG][com.amazonaws.internal.SdkSSLSocket] closing ec2.eu-west-1.amazonaws.com/178.236.7.129:443\n[2015-07-08 08:40:24,346][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89057\n[2015-07-08 08:40:24,346][INFO ][cluster.service ] [elasticsearch-nodes2.localdomain] added {[elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1},}, reason: zen-disco-receiv\ne(from master [[elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}])\n[2015-07-08 08:40:35,973][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:35,973][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:35,973][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:35,973][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:41,857][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:41,857][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:41,857][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:41,857][DEBUG][action.bulk ] [elasticsearch-nodes2.localdomain] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-07-08 08:40:49,191][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89058\n[2015-07-08 08:40:49,192][INFO ][cluster.routing.allocation.decider] [elasticsearch-nodes2.localdomain] updating [cluster.routing.allocation.enable] from [NONE] to [ALL]\n[2015-07-08 08:40:49,205][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89059\n[2015-07-08 08:40:49,305][DEBUG][index.gateway ] [elasticsearch-nodes2.localdomain] [index_a][1] starting recovery from local ...\n[2015-07-08 08:40:49,306][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_a][1] no translog id set (indexShouldExist [false])\n[2015-07-08 08:40:49,318][TRACE][index.gateway ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery completed from local, took [13ms]\n index : files [0] with total_size [0b], took[1ms]\n : recovered_files [0] with total_size [0b]\n : reusing_files [0] with total_size [0b]\n start : took [11ms], check_index [0s]\n translog : number_of_operations [0], took [0s]\n[2015-07-08 08:40:49,322][DEBUG][index.gateway ] [elasticsearch-nodes2.localdomain] [index_a][5] starting recovery from local ...\n[2015-07-08 08:40:49,324][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_a][5] cleaning existing shard, shouldn't exists\n[2015-07-08 08:40:49,618][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89060\n[2015-07-08 08:40:49,640][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][3] started recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [1]\n[2015-07-08 08:40:49,641][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_b][3] [1]\n[2015-07-08 08:40:49,645][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][3] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:49,657][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][3] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.indices.recovery.DelayRecoveryException: source node does not have the shard listed in its state as allocated on the node\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:108)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:40:49,657][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] will retrying recovery with id [1] in [500ms] (reason [source node does not have the shard listed in its state as allocated on the node])\n[2015-07-08 08:40:49,844][DEBUG][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_a][5] failed to list file details\njava.io.FileNotFoundException: segments_d\n at org.apache.lucene.store.FSDirectory.fileLength(FSDirectory.java:255)\n at org.apache.lucene.store.FileSwitchDirectory.fileLength(FileSwitchDirectory.java:147)\n at org.apache.lucene.store.FilterDirectory.fileLength(FilterDirectory.java:63)\n at org.apache.lucene.store.FilterDirectory.fileLength(FilterDirectory.java:63)\n at org.apache.lucene.store.FilterDirectory.fileLength(FilterDirectory.java:63)\n at org.elasticsearch.index.gateway.local.LocalIndexShardGateway.recover(LocalIndexShardGateway.java:171)\n at org.elasticsearch.index.gateway.IndexShardGatewayService$1.run(IndexShardGatewayService.java:112)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:40:49,844][TRACE][index.gateway.local ] [elasticsearch-nodes2.localdomain] [index_a][5] no translog id set (indexShouldExist [false])\n[2015-07-08 08:40:49,848][TRACE][index.gateway ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery completed from local, took [526ms]\n index : files [0] with total_size [0b], took[522ms]\n : recovered_files [0] with total_size [0b]\n : reusing_files [0] with total_size [0b]\n start : took [3ms], check_index [0s]\n translog : number_of_operations [0], took [0s]\n[2015-07-08 08:40:49,855][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89061\n[2015-07-08 08:40:49,872][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][2] started recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [2]\n[2015-07-08 08:40:49,872][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][2] [2]\n[2015-07-08 08:40:49,876][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][2] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:49,885][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][2] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.indices.recovery.DelayRecoveryException: source node does not have the shard listed in its state as allocated on the node\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:108)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:40:49,885][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] will retrying recovery with id [2] in [500ms] (reason [source node does not have the shard listed in its state as allocated on the node])\n[2015-07-08 08:40:50,157][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_b][3] [1]\n[2015-07-08 08:40:50,162][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][3] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:50,173][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89062\n[2015-07-08 08:40:50,318][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] starting recovery to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}, mark_as_relocated false\n[2015-07-08 08:40:50,326][INFO ][indices.recovery ] [elasticsearch-nodes2.localdomain] Recovery with sync ID 12837074 numDocs: 12837074 vs. true\n[2015-07-08 08:40:50,326][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] skipping [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1} - identical sync id [AU5pGODmRtbfpQ748m1P] found on both source and target\n[2015-07-08 08:40:50,327][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [3.7ms]\n[2015-07-08 08:40:50,328][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: start\n[2015-07-08 08:40:50,340][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89063\n[2015-07-08 08:40:50,386][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][2] [2]\n[2015-07-08 08:40:50,389][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][2] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:50,795][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][2] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.index.engine.RecoveryEngineException: [index_a][2] Phase[1] Execution failed\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:898)\n at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:780)\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:125)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.indices.recovery.RecoverFilesRecoveryException: [index_a][2] Failed to transfer [0] files with total size of [0b]\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:431)\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:893)\n ... 10 more\nCaused by: java.lang.IllegalStateException: try to recover [index_a][2] from primary shard with sync id but number of docs differ: 12627628 (elasticsearch-nodes1.localdomain, primary) vs 0(elasticsearch-nodes2.localdomain)\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:177)\n ... 11 more\n[2015-07-08 08:40:50,796][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][2] failing recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [2]. Send shard failure: [true]\n[2015-07-08 08:40:50,799][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][2]] marking and sending shard failed due to [failed recovery]\norg.elasticsearch.indices.recovery.RecoveryFailedException: [index_a][2]: Recovery failed from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1} into [elasticse\narch-nodes2.localdomain][t7ZN91B7Se2qT6NsRRau5g][elasticsearch-nodes2.localdomain][inet[/10.210.14.19:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n at org.elasticsearch.indices.recovery.RecoveryTarget.doRecovery(RecoveryTarget.java:280)\n at org.elasticsearch.indices.recovery.RecoveryTarget.access$700(RecoveryTarget.java:70)\n at org.elasticsearch.indices.recovery.RecoveryTarget$RecoveryRunner.doRun(RecoveryTarget.java:561)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.index.engine.RecoveryEngineException: [index_a][2] Phase[1] Execution failed\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:898)\n at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:780)\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:125)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.indices.recovery.RecoverFilesRecoveryException: [index_a][2] Failed to transfer [0] files with total size of [0b]\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:431)\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:893)\n ... 10 more\nCaused by: java.lang.IllegalStateException: try to recover [index_a][2] from primary shard with sync id but number of docs differ: 12627628 (elasticsearch-nodes1.localdomain, primary) vs 0(elasticsearch-nodes2.localdomain)\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:177)\n ... 11 more\n[2015-07-08 08:40:51,257][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: start took [929.1ms]\n[2015-07-08 08:40:51,257][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: updating current mapping to master\n[2015-07-08 08:40:51,261][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:40:51,262][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] no translog operations (id: [1436278480889]) to send to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_avai\nlability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:51,262][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] sending final batch of [0][0b] (total: [0], id: [1436278480889]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][in\net[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:51,262][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [239.7micros]\n[2015-07-08 08:40:51,262][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase3] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:40:51,262][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] no translog operations (id: [1436278480889]) to send to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_avai\nlability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:51,262][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] sending final batch of [0][0b] (total: [0], id: [1436278480889]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][in\net[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:51,264][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][4] recovery [phase3] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [1.5ms]\n[2015-07-08 08:40:55,355][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][3] marking recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1} as done, id [1]\n[2015-07-08 08:40:55,355][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][3] recovery completed from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_l\nocal_storage_nodes=1}, took[5.7s]\n phase1: recovered_files [0] with total_size of [0b], took [1ms], throttling_wait [0s]\n : reusing_files [0] with total_size of [0b]\n phase2: start took [252ms]\n : recovered [0] transaction log operations, took [0s]\n phase3: recovered [0] transaction log operations, took [2ms]\n[2015-07-08 08:40:55,356][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89064\n[2015-07-08 08:40:55,385][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89065\n[2015-07-08 08:40:55,400][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][2] started recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [3]\n[2015-07-08 08:40:55,401][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_b][2] [3]\n[2015-07-08 08:40:55,403][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][2] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:55,404][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] starting recovery to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}, mark_as_relocated false\n[2015-07-08 08:40:55,406][INFO ][indices.recovery ] [elasticsearch-nodes2.localdomain] Recovery with sync ID 12940164 numDocs: 12940164 vs. true\n[2015-07-08 08:40:55,406][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] skipping [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1} - identical sync id [AU5pGM3poRUqHcJOE4ov] found on both source and target\n[2015-07-08 08:40:55,406][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [2ms]\n[2015-07-08 08:40:55,406][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: start\n[2015-07-08 08:40:55,407][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][2] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.indices.recovery.DelayRecoveryException: source node does not have the shard listed in its state as allocated on the node\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:108)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:40:55,407][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] will retrying recovery with id [3] in [500ms] (reason [source node does not have the shard listed in its state as allocated on the node])\n[2015-07-08 08:40:55,424][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89066\n[2015-07-08 08:40:55,438][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] started recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [4]\n[2015-07-08 08:40:55,438][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][3] [4]\n[2015-07-08 08:40:55,440][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:55,443][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.indices.recovery.DelayRecoveryException: source node does not have the shard listed in its state as allocated on the node\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:108)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-07-08 08:40:55,443][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] will retrying recovery with id [4] in [500ms] (reason [source node does not have the shard listed in its state as allocated on the node])\n[2015-07-08 08:40:55,613][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: start took [207.2ms]\n[2015-07-08 08:40:55,614][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: updating current mapping to master\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] no translog operations (id: [1436278480893]) to send to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_avai\nlability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] sending final batch of [0][0b] (total: [0], id: [1436278480893]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][in\net[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [164.1micros]\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase3] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] no translog operations (id: [1436278480893]) to send to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_avai\nlability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:55,617][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] sending final batch of [0][0b] (total: [0], id: [1436278480893]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][in\net[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:40:55,618][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][5] recovery [phase3] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [936micros]\n[2015-07-08 08:40:55,907][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_b][2] [3]\n[2015-07-08 08:40:55,910][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][2] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:55,944][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][3] [4]\n[2015-07-08 08:40:55,946][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:40:56,315][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.index.engine.RecoveryEngineException: [index_a][3] Phase[1] Execution failed\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:898)\n at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:780)\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:125)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.indices.recovery.RecoverFilesRecoveryException: [index_a][3] Failed to transfer [0] files with total size of [0b]\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:431)\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:893)\n ... 10 more\nCaused by: java.lang.IllegalStateException: try to recover [index_a][3] from primary shard with sync id but number of docs differ: 12599260 (elasticsearch-nodes1.localdomain, primary) vs 0(elasticsearch-nodes2.localdomain)\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:177)\n ... 11 more\n[2015-07-08 08:40:56,316][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] failing recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [4]. Send shard failure: [true]\n[2015-07-08 08:40:56,316][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][3]] marking and sending shard failed due to [failed recovery]\norg.elasticsearch.indices.recovery.RecoveryFailedException: [index_a][3]: Recovery failed from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1} into [elasticse\narch-nodes2.localdomain][t7ZN91B7Se2qT6NsRRau5g][elasticsearch-nodes2.localdomain][inet[/10.210.14.19:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n at org.elasticsearch.indices.recovery.RecoveryTarget.doRecovery(RecoveryTarget.java:280)\n at org.elasticsearch.indices.recovery.RecoveryTarget.access$700(RecoveryTarget.java:70)\n at org.elasticsearch.indices.recovery.RecoveryTarget$RecoveryRunner.doRun(RecoveryTarget.java:561)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.index.engine.RecoveryEngineException: [index_a][3] Phase[1] Execution failed\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:898)\n at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:780)\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:125)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.indices.recovery.RecoverFilesRecoveryException: [index_a][3] Failed to transfer [0] files with total size of [0b]\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:431)\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:893)\n ... 10 more\nCaused by: java.lang.IllegalStateException: try to recover [index_a][3] from primary shard with sync id but number of docs differ: 12599260 (elasticsearch-nodes1.localdomain, primary) vs 0(elasticsearch-nodes2.localdomain)\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:177)\n ... 11 more\n[2015-07-08 08:41:00,460][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][2] marking recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1} as done, id [3]\n[2015-07-08 08:41:00,461][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_b][2] recovery completed from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_l\nocal_storage_nodes=1}, took[5s]\n phase1: recovered_files [0] with total_size of [0b], took [1ms], throttling_wait [0s]\n : reusing_files [0] with total_size of [0b]\n phase2: start took [159ms]\n : recovered [0] transaction log operations, took [0s]\n phase3: recovered [0] transaction log operations, took [2ms]\n[2015-07-08 08:41:00,461][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89067\n[2015-07-08 08:41:00,487][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] starting recovery to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}, mark_as_relocated false\n[2015-07-08 08:41:00,489][INFO ][indices.recovery ] [elasticsearch-nodes2.localdomain] Recovery with sync ID 0 numDocs: 12635618 vs. true\n[2015-07-08 08:41:00,501][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89068\n[2015-07-08 08:41:00,520][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][4] started recovery from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_loca\nl_storage_nodes=1}, id [5]\n[2015-07-08 08:41:00,520][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][4] [5]\n[2015-07-08 08:41:00,522][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][4] starting recovery from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_loc\nal_storage_nodes=1}\n[2015-07-08 08:41:01,589][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][4] Got exception on recovery\norg.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.index.engine.RecoveryEngineException: [index_a][4] Phase[1] Execution failed\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:898)\n at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:780)\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:125)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.indices.recovery.RecoverFilesRecoveryException: [index_a][4] Failed to transfer [0] files with total size of [0b]\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:431)\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:893)\n ... 10 more\nCaused by: java.lang.IllegalStateException: try to recover [index_a][4] from primary shard with sync id but number of docs differ: 12533048 (elasticsearch-nodes3.localdomain, primary) vs 0(elasticsearch-nodes2.localdomain)\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:177)\n ... 11 more\n[2015-07-08 08:41:01,590][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][4] failing recovery from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_loca\nl_storage_nodes=1}, id [5]. Send shard failure: [true]\n[2015-07-08 08:41:01,590][WARN ][indices.cluster ] [elasticsearch-nodes2.localdomain] [[index_a][4]] marking and sending shard failed due to [failed recovery]\norg.elasticsearch.indices.recovery.RecoveryFailedException: [index_a][4]: Recovery failed from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1} into [elasticsea\nrch-nodes2.localdomain][t7ZN91B7Se2qT6NsRRau5g][elasticsearch-nodes2.localdomain][inet[/10.210.14.19:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n at org.elasticsearch.indices.recovery.RecoveryTarget.doRecovery(RecoveryTarget.java:280)\n at org.elasticsearch.indices.recovery.RecoveryTarget.access$700(RecoveryTarget.java:70)\n at org.elasticsearch.indices.recovery.RecoveryTarget$RecoveryRunner.doRun(RecoveryTarget.java:561)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.transport.RemoteTransportException: [elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]][internal:index/shard/recovery/start_recovery]\nCaused by: org.elasticsearch.index.engine.RecoveryEngineException: [index_a][4] Phase[1] Execution failed\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:898)\n at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:780)\n at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:125)\n at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:49)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:146)\n at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:132)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:279)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.elasticsearch.indices.recovery.RecoverFilesRecoveryException: [index_a][4] Failed to transfer [0] files with total size of [0b]\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:431)\n at org.elasticsearch.index.engine.InternalEngine.recover(InternalEngine.java:893)\n ... 10 more\nCaused by: java.lang.IllegalStateException: try to recover [index_a][4] from primary shard with sync id but number of docs differ: 12533048 (elasticsearch-nodes3.localdomain, primary) vs 0(elasticsearch-nodes2.localdomain)\n at org.elasticsearch.indices.recovery.RecoverySourceHandler.phase1(RecoverySourceHandler.java:177)\n ... 11 more\n[2015-07-08 08:41:05,539][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89069\n[2015-07-08 08:41:05,569][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89070\n[2015-07-08 08:41:05,587][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89071\n[2015-07-08 08:41:05,601][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] started recovery from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_loca\nl_storage_nodes=1}, id [6]\n[2015-07-08 08:41:05,601][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][0] [6]\n[2015-07-08 08:41:05,601][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] starting recovery from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_loc\nal_storage_nodes=1}\n[2015-07-08 08:41:05,606][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] starting recovery to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}, mark_as_relocated false\n[2015-07-08 08:41:05,607][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: recovering [segments_1], does not exists in remote\n[2015-07-08 08:41:05,607][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: recovering_files [1] with total_size [79b], reusing_files [0] with total_size [0b]\n[2015-07-08 08:41:05,634][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] starting recovery to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}, mark_as_relocated false\n[2015-07-08 08:41:05,636][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_28.cfe], does not exists in remote\n[2015-07-08 08:41:05,636][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_28.si], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_28.cfs], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_29.si], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_29.cfs], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_29.cfe], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_2b.si], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_2b.cfe], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_2b.cfs], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_2a.cfs], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_2a.cfe], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [_2a.si], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering [segments_f], does not exists in remote\n[2015-07-08 08:41:05,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: recovering_files [13] with total_size [5.3mb], reusing_files [0] with total_size [0b]\n[2015-07-08 08:41:05,638][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89072\n[2015-07-08 08:41:05,653][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] started recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1}, id [7]\n[2015-07-08 08:41:05,653][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] collecting local files for [index_a][3] [7]\n[2015-07-08 08:41:05,655][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] starting recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_lo\ncal_storage_nodes=1}\n[2015-07-08 08:41:05,809][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89073\n[2015-07-08 08:41:06,045][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase1] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: took [410.1ms]\n[2015-07-08 08:41:06,045][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase2] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: start\n[2015-07-08 08:41:06,054][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase2] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: start took [8.3ms]\n[2015-07-08 08:41:06,054][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase2] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: updating current mapping to master\n[2015-07-08 08:41:06,057][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase2] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:41:06,077][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase1] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [471.4ms]\n[2015-07-08 08:41:06,078][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: start\n[2015-07-08 08:41:06,082][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: start took [3.5ms]\n[2015-07-08 08:41:06,082][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: updating current mapping to master\n[2015-07-08 08:41:06,085][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:41:06,094][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241kb] (total: [18462], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain\n][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,131][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.3kb] (total: [55022], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,349][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][240.4kb] (total: [19222], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,473][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.8kb] (total: [56302], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,506][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][240.3kb] (total: [19792], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,634][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.5kb] (total: [20205], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,670][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.5kb] (total: [56735], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,742][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][240.9kb] (total: [20205], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,789][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][245.1kb] (total: [56735], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,833][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.7kb] (total: [20205], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,877][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242kb] (total: [56832], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,924][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.5kb] (total: [20640], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:06,963][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.2kb] (total: [57160], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,019][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][240.7kb] (total: [21338], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,043][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.3kb] (total: [57992], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,107][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][243kb] (total: [21499], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain\n][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,111][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.5kb] (total: [57992], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,172][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.2kb] (total: [57992], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,192][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.7kb] (total: [21499], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,234][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.3kb] (total: [57992], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,278][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][242.2kb] (total: [21499], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,294][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.6kb] (total: [57992], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,356][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.5kb] (total: [58611], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,365][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.7kb] (total: [22220], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,485][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.7kb] (total: [22528], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,487][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.8kb] (total: [59122], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,555][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.9kb] (total: [59661], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,571][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][242.2kb] (total: [23207], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,620][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.7kb] (total: [59661], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,657][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.4kb] (total: [23207], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,685][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.6kb] (total: [59661], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,741][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][242kb] (total: [23207], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain\n][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,749][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.3kb] (total: [59661], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,808][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.6kb] (total: [59661], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,825][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.9kb] (total: [23207], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,909][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][242.1kb] (total: [23567], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,953][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [60497], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:07,990][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending final batch of [439][200.2kb] (total: [24338], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.loca\nldomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,039][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.1kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,051][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase2] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: took [1.9s]\n[2015-07-08 08:41:08,051][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase3] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:41:08,062][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][352.6kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,097][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][242.4kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,103][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.3kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,129][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][301.7kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,157][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.6kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,164][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,183][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain\n][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,209][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending batch of [1000][241.5kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdoma\nin][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,225][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,287][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.5kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,295][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] sending final batch of [436][105.5kb] (total: [24875], id: [1436250922082]) translog operations to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.loca\nldomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,349][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.9kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,413][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [61334], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,476][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [61991], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,500][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][5] recovery [phase3] to [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loca\nl_storage_nodes=1}: took [448.8ms]\n[2015-07-08 08:41:08,562][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.1kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.1kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,707][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.4kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,775][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.9kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,841][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.1kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,905][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:08,969][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.8kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,030][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.3kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,167][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.4kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,243][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.2kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,256][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89074\n[2015-07-08 08:41:09,307][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89075\n[2015-07-08 08:41:09,310][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,386][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][239.9kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,445][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.8kb] (total: [62686], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,720][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][240.4kb] (total: [62854], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,810][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [63120], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,895][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.7kb] (total: [63977], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:09,979][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.1kb] (total: [63977], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,063][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.8kb] (total: [63977], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,145][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.3kb] (total: [63977], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,268][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241kb] (total: [64410], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,499][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.8kb] (total: [65269], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,630][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.9kb] (total: [65277], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,773][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89076\n[2015-07-08 08:41:10,799][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.6kb] (total: [65305], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,893][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.4kb] (total: [65341], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:10,975][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242kb] (total: [65363], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,080][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.1kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,189][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.1kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,294][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending final batch of [995][370.1kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.loca\nldomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,393][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase2] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [5.3s]\n[2015-07-08 08:41:11,393][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase3] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: sending transaction log operations\n[2015-07-08 08:41:11,399][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][299.2kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,406][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] marking recovery from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_loc\nal_storage_nodes=1} as done, id [7]\n[2015-07-08 08:41:11,406][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][3] recovery completed from [elasticsearch-nodes1.localdomain][fX9BOXiSR5asrQOg9F8L8g][elasticsearch-nodes1.localdomain][inet[/10.210.14.138:9300]]{aws_availability_zone=eu-west-1c, max_l\nocal_storage_nodes=1}, took[5.7s]\n phase1: recovered_files [12] with total_size of [6.9mb], took [992ms], throttling_wait [0s]\n : reusing_files [0] with total_size of [0b]\n phase2: start took [13ms]\n : recovered [38341] transaction log operations, took [3.4s]\n phase3: recovered [7286] transaction log operations, took [881ms]\n[2015-07-08 08:41:11,409][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89077\n[2015-07-08 08:41:11,440][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.4kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,479][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][304.5kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,521][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,561][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.9kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,597][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242.1kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,637][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][241.8kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,674][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][354.1kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,714][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][242kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain\n][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,743][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending batch of [1000][535.3kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdoma\nin][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:11,773][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] sending final batch of [395][991.8kb] (total: [65390], id: [1436337649307]) translog operations to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.loca\nldomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local_storage_nodes=1}\n[2015-07-08 08:41:12,092][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][1] recovery [phase3] to [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_local\n_storage_nodes=1}: took [699.4ms]\n[2015-07-08 08:41:12,684][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89078\n[2015-07-08 08:41:12,859][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] marking recovery from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_loca\nl_storage_nodes=1} as done, id [6]\n[2015-07-08 08:41:12,859][TRACE][indices.recovery ] [elasticsearch-nodes2.localdomain] [index_a][0] recovery completed from [elasticsearch-nodes3.localdomain][_WBKY5VgSPScb2rcEM8Ejw][elasticsearch-nodes3.localdomain][inet[/10.210.14.50:9300]]{aws_availability_zone=eu-west-1a, max_lo\ncal_storage_nodes=1}, took[7.2s]\n phase1: recovered_files [1] with total_size of [108b], took [13ms], throttling_wait [0s]\n : reusing_files [0] with total_size of [0b]\n phase2: start took [4ms]\n : recovered [54870] transaction log operations, took [5.8s]\n phase3: recovered [10795] transaction log operations, took [668ms]\n[2015-07-08 08:41:12,862][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89079\n[2015-07-08 08:41:12,934][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89080\n[2015-07-08 08:41:19,194][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89081\n[2015-07-08 08:46:20,640][DEBUG][discovery.zen.publish ] [elasticsearch-nodes2.localdomain] received cluster state version 89082\n```\n", "created_at": "2015-07-09T15:26:04Z" }, { "body": "Hi @womwombat \n\nWe had a long talk about this internally, and need some more info to track down what is happening here. Please could you give us hot threads output while you see the \"failed to lock\" message happening, as follows:\n\n```\ncurl -XGET \"http://localhost:9202/_nodes/hot_threads?threads=10000&ignore_idle_threads=false\"\n```\n- Could we have the logs from the moment you start the optimize, through closing the index, and reopening the other index.\n- Do you use scroll requests? If so, what scroll timeout do you set?\n- Could you try removing optimize from your process and see if that helps?\n- Another wild suggestion - do your servers use Haswell CPUs? Wondering if you are suffering from the futex bug https://github.com/elastic/elasticsearch/issues/11526\n", "created_at": "2015-07-17T10:41:02Z" }, { "body": "Hi,\n- The log bellow is the only log I have, maybe you want TRACE from something else ?\n- we don't use scroll requests\n- we can't right now, but we'll do this next week\n- our servers are not using haswell CPU \n- We have migrated all the nodes form AWS to on premises brand-new servers, with 64G ram, 256G SSD, and the pb still occurs in 1.7.0\n- here is a full log of one of the event yesterday\n\n```\n[2015-08-06 19:36:37,742][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][4]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][4] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][4], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-08-06 19:36:47,848][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-08-06 19:36:57,931][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][4]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][4] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][4], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n\n----\n\nThere're 3000 lines of this error, I've skipped them for readability\n\n----\n\n\n[2015-08-06 20:00:15,485][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-08-06 20:00:25,722][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-08-06 20:00:35,742][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-08-06 20:00:38,747][DEBUG][action.bulk ] [node-01] observer: timeout notification from cluster service. timeout setting [1m], time since start [1m]\n[2015-08-06 20:00:44,367][INFO ][node ] [node-01] stopping ...\n[2015-08-06 20:00:44,396][WARN ][netty.channel.DefaultChannelPipeline] An exception was thrown by an exception handler.\njava.util.concurrent.RejectedExecutionException: Worker has already been shutdown\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.registerTask(AbstractNioSelector.java:120)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:72)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:56)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioChannelSink.execute(AbstractNioChannelSink.java:34)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.execute(DefaultChannelPipeline.java:636)\n at org.elasticsearch.common.netty.channel.Channels.fireExceptionCaughtLater(Channels.java:496)\n at org.elasticsearch.common.netty.channel.AbstractChannelSink.exceptionCaught(AbstractChannelSink.java:46)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.notifyHandlerException(DefaultChannelPipeline.java:658)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:781)\n at org.elasticsearch.common.netty.channel.Channels.write(Channels.java:725)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.doEncode(OneToOneEncoder.java:71)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:59)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.common.netty.handler.codec.http.HttpContentEncoder.writeRequested(HttpContentEncoder.java:138)\n at org.elasticsearch.common.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:254)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.handleDownstream(HttpPipeliningHandler.java:87)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:582)\n at org.elasticsearch.http.netty.NettyHttpChannel.sendResponse(NettyHttpChannel.java:195)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.finishHim(TransportBulkAction.java:360)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.onFailure(TransportBulkAction.java:355)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.finishAsFailed(TransportShardReplicationOperationAction.java:536)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$3.onClusterServiceClose(TransportShardReplicationOperationAction.java:509)\n at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onClose(ClusterStateObserver.java:217)\n at org.elasticsearch.cluster.service.InternalClusterService.doStop(InternalClusterService.java:174)\n at org.elasticsearch.common.component.AbstractLifecycleComponent.stop(AbstractLifecycleComponent.java:105)\n at org.elasticsearch.node.internal.InternalNode.stop(InternalNode.java:310)\n at org.elasticsearch.node.internal.InternalNode.close(InternalNode.java:334)\n at org.elasticsearch.bootstrap.Bootstrap$1.run(Bootstrap.java:82)\n[2015-08-06 20:00:44,400][WARN ][netty.channel.DefaultChannelPipeline] An exception was thrown by an exception handler.\njava.util.concurrent.RejectedExecutionException: Worker has already been shutdown\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.registerTask(AbstractNioSelector.java:120)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:72)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:56)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioChannelSink.execute(AbstractNioChannelSink.java:34)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.execute(DefaultChannelPipeline.java:636)\n at org.elasticsearch.common.netty.channel.Channels.fireExceptionCaughtLater(Channels.java:496)\n at org.elasticsearch.common.netty.channel.AbstractChannelSink.exceptionCaught(AbstractChannelSink.java:46)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.notifyHandlerException(DefaultChannelPipeline.java:658)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:781)\n at org.elasticsearch.common.netty.channel.Channels.write(Channels.java:725)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.doEncode(OneToOneEncoder.java:71)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:59)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.common.netty.handler.codec.http.HttpContentEncoder.writeRequested(HttpContentEncoder.java:138)\n at org.elasticsearch.common.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:254)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.handleDownstream(HttpPipeliningHandler.java:87)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:582)\n at org.elasticsearch.http.netty.NettyHttpChannel.sendResponse(NettyHttpChannel.java:195)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction$AsyncAction$2.doRun(TransportSearchQueryThenFetchAction.java:152)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-08-06 20:00:44,425][WARN ][netty.channel.DefaultChannelPipeline] An exception was thrown by an exception handler.\njava.util.concurrent.RejectedExecutionException: Worker has already been shutdown\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.registerTask(AbstractNioSelector.java:120)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:72)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:56)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioChannelSink.execute(AbstractNioChannelSink.java:34)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.execute(DefaultChannelPipeline.java:636)\n at org.elasticsearch.common.netty.channel.Channels.fireExceptionCaughtLater(Channels.java:496)\n at org.elasticsearch.common.netty.channel.AbstractChannelSink.exceptionCaught(AbstractChannelSink.java:46)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.notifyHandlerException(DefaultChannelPipeline.java:658)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:781)\n at org.elasticsearch.common.netty.channel.Channels.write(Channels.java:725)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.doEncode(OneToOneEncoder.java:71)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:59)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.common.netty.handler.codec.http.HttpContentEncoder.writeRequested(HttpContentEncoder.java:138)\n at org.elasticsearch.common.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:254)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.handleDownstream(HttpPipeliningHandler.java:87)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:582)\n at org.elasticsearch.http.netty.NettyHttpChannel.sendResponse(NettyHttpChannel.java:195)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction$AsyncAction$2.doRun(TransportSearchQueryThenFetchAction.java:152)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-08-06 20:00:45,808][WARN ][indices.cluster ] [node-01] [[my_custom_index_b][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n[2015-08-06 20:00:45,828][WARN ][netty.channel.DefaultChannelPipeline] An exception was thrown by an exception handler.\njava.util.concurrent.RejectedExecutionException: Worker has already been shutdown\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.registerTask(AbstractNioSelector.java:120)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:72)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.executeInIoThread(AbstractNioWorker.java:56)\n at org.elasticsearch.common.netty.channel.socket.nio.NioWorker.executeInIoThread(NioWorker.java:36)\n at org.elasticsearch.common.netty.channel.socket.nio.AbstractNioChannelSink.execute(AbstractNioChannelSink.java:34)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.execute(DefaultChannelPipeline.java:636)\n at org.elasticsearch.common.netty.channel.Channels.fireExceptionCaughtLater(Channels.java:496)\n at org.elasticsearch.common.netty.channel.AbstractChannelSink.exceptionCaught(AbstractChannelSink.java:46)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.notifyHandlerException(DefaultChannelPipeline.java:658)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:781)\n at org.elasticsearch.common.netty.channel.Channels.write(Channels.java:725)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.doEncode(OneToOneEncoder.java:71)\n at org.elasticsearch.common.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:59)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.common.netty.handler.codec.http.HttpContentEncoder.writeRequested(HttpContentEncoder.java:138)\n at org.elasticsearch.common.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:254)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:784)\n at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.handleDownstream(HttpPipeliningHandler.java:87)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:591)\n at org.elasticsearch.common.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:582)\n at org.elasticsearch.http.netty.NettyHttpChannel.sendResponse(NettyHttpChannel.java:195)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.finishHim(TransportBulkAction.java:360)\n at org.elasticsearch.action.bulk.TransportBulkAction$2.onFailure(TransportBulkAction.java:355)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.finishAsFailed(TransportShardReplicationOperationAction.java:536)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$3.onClusterServiceClose(TransportShardReplicationOperationAction.java:509)\n at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onClose(ClusterStateObserver.java:217)\n at org.elasticsearch.cluster.service.InternalClusterService.add(InternalClusterService.java:236)\n at org.elasticsearch.cluster.ClusterStateObserver.waitForNextChange(ClusterStateObserver.java:146)\n at org.elasticsearch.cluster.ClusterStateObserver.waitForNextChange(ClusterStateObserver.java:96)\n at org.elasticsearch.cluster.ClusterStateObserver.waitForNextChange(ClusterStateObserver.java:88)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.retry(TransportShardReplicationOperationAction.java:501)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.retryBecauseUnavailable(TransportShardReplicationOperationAction.java:655)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase.doRun(TransportShardReplicationOperationAction.java:362)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$3.onNewClusterState(TransportShardReplicationOperationAction.java:504)\n at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.postAdded(ClusterStateObserver.java:201)\n at org.elasticsearch.cluster.service.InternalClusterService$1.run(InternalClusterService.java:248)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\n[2015-08-06 20:00:46,409][INFO ][node ] [node-01] stopped\n[2015-08-06 20:00:46,409][INFO ][node ] [node-01] closing ...\n[2015-08-06 20:00:56,428][WARN ][cluster.action.index ] [node-01] [my_custom_index_b] failed to lock all shards for index - timed out after 30 seconds\n[2015-08-06 20:00:56,440][INFO ][node ] [node-01] closed\n[2015-08-06 20:00:58,249][INFO ][node ] [node-01] version[1.7.0], pid[27455], build[929b973/2015-07-16T14:31:07Z]\n[2015-08-06 20:00:58,249][INFO ][node ] [node-01] initializing ...\n[2015-08-06 20:00:58,362][INFO ][plugins ] [node-01] loaded [], sites [HQ, kopf, whatson]\n[2015-08-06 20:00:58,411][INFO ][env ] [node-01] using [1] data paths, mounts [[/srv (/dev/sdb)]], net usable_space [219.4gb], net total_space [237.7gb], types [xfs]\n[2015-08-06 20:01:01,277][INFO ][node ] [node-01] initialized\n[2015-08-06 20:01:01,278][INFO ][node ] [node-01] starting ...\n[2015-08-06 20:01:01,485][INFO ][transport ] [node-01] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/192.168.1.211:9300]}\n[2015-08-06 20:01:01,502][INFO ][discovery ] [node-01] my_cluster/65f-xeTqT3msqPDbYlbtew\n[2015-08-06 20:01:11,165][INFO ][cluster.service ] [node-01] detected_master [node-02][-_LoaCn7TaqMjAkRDaHTQA][node-02][inet[/192.168.1.212:9300]], added {[node-02][-_LoaCn7TaqMjAkRDaHTQA][node-02][\ninet[/192.168.1.212:9300]],[node-03][zdxOnF2qQrG1rJ4czVMXUA][node-03][inet[/192.168.1.213:9300]],}, reason: zen-disco-receive(from master [[node-02][-_LoaCn7TaqMjAkRDaHTQA][node-02][inet[/192.168.1.212:9300]]])\n[2015-08-06 20:01:11,235][INFO ][http ] [node-01] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/192.168.1.211:9200]}\n[2015-08-06 20:01:11,235][INFO ][node ] [node-01] started\n```\n", "created_at": "2015-08-07T08:49:54Z" }, { "body": "Hi @womwombat \n\nThanks for the logs. Restarting was the right thing to do here. Could you look back in the logs on that server to before the problem started, to find out what triggered this issue?\n", "created_at": "2015-08-07T11:28:54Z" }, { "body": "Hi,\n\nThanks for your help :)\n\nThe log above was on node-01, here is log on node-02 (looks like there's what's your looking for)\n\n```\n[2015-08-06 19:36:02,594][INFO ][cluster.metadata ] [node-02] [my_custom_index_b] deleting index\n[2015-08-06 19:36:32,663][INFO ][cluster.metadata ] [node-02] [my_custom_index_b] creating index, cause [api], templates [], shards [6]/[1], mappings []\n[2015-08-06 19:36:37,745][WARN ][cluster.action.shard ] [node-02] [my_custom_index_b][4] received shard failed for [my_custom_index_b][4], node[n8Ry5JsZT4WiXqr16ux4dg], [P], s[INITIALIZING], unassigned_info[[reason=INDEX_CREATED], at[2015-08-06T17:36\n:32.664Z]], indexUUID [mcr4fgrtRwaHgITxhd3qrg], reason [shard failure [failed to create shard][IndexShardCreationException[[my_custom_index_b][4] failed to create shard]; nested: LockObtainFailedException[Can't lock shard [my_custom_index_b][4], timed out after 50\n00ms]; ]]\n[2015-08-06 19:36:42,803][WARN ][indices.cluster ] [node-02] [[my_custom_index_b][0]] marking and sending shard failed due to [failed to create shard]\norg.elasticsearch.index.shard.IndexShardCreationException: [my_custom_index_b][0] failed to create shard\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:357)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:704)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:605)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:185)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:480)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:188)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:158)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [my_custom_index_b][0], timed out after 5000ms\n at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:582)\n at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:510)\n at org.elasticsearch.index.IndexService.createShard(IndexService.java:310)\n ... 9 more\n```\n\nAnd on node-03:\n\n```\n[2015-08-06 19:38:27,714][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:39:29,515][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:40:29,701][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:41:30,813][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:41:30,928][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:42:31,519][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:42:31,600][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:43:32,715][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:44:32,801][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:44:32,884][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:45:33,165][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:45:33,165][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:46:33,894][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:46:33,968][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:47:34,267][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:47:34,293][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:48:34,505][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:48:34,577][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:49:35,501][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:52:36,197][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:53:36,611][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:54:36,775][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:58:41,294][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 19:59:38,439][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 20:00:42,834][DEBUG][action.bulk ] [node-03] observer: timeout notification from cluster serv\nice. timeout setting [1m], time since start [1m]\n[2015-08-06 20:00:45,853][INFO ][cluster.service ] [node-03] removed {[node-01][n8Ry5JsZT4WiXqr16ux\n4dg][node-01][inet[/192.168.1.211:9300]],}, reason: zen-disco-receive(from master [[node-02][-_LoaCn7TaqMjA\nkRDaHTQA][node-02][inet[/192.168.1.212:9300]]])\n[2015-08-06 20:01:11,144][INFO ][cluster.service ] [node-03] added {[node-01][65f-xeTqT3msqPDbYlbte\nw][node-01][inet[/192.168.1.211:9300]],}, reason: zen-disco-receive(from master [[node-02][-_LoaCn7TaqMjAkR\nDaHTQA][node-02][inet[/192.168.1.212:9300]]])\n```\n", "created_at": "2015-08-07T11:54:54Z" }, { "body": "So essentially the shards for the new index can't be allocated because the shard from the old index is still hanging around. Something is holding on to it, but not sure what. eg a scroll request would be one option but it could be many things. That hot threads output (while the node is in this state) might help here, plus as much information as you can give us about any exceptions that you see (not necessarily related to allocation) plus queries that you run etc etc.\n", "created_at": "2015-08-07T12:55:36Z" }, { "body": "Hi,\n\nThe hot thread output is rather large (598ko) so I paste it here http://justpaste.it/es-hot-thread\n\nRegards,\n", "created_at": "2015-08-08T11:14:37Z" }, { "body": "I am seeing this same issue on my (small) cluster. I bulk-index about 200M documents with index.refresh_interval:-1 and index.number_of_replicas:0. Shortly after setting index.number_of_replicas:1, I started seeing the failed shard errors. Seems to be repeatable. Let me know if I can send any info that might help.\n", "created_at": "2015-08-17T21:45:33Z" }, { "body": "Even i am seeing the same issue, after upgrade from 1.4 to 1.6 version and made replication 0 from 1. So I upgraded to 1.7, then this issue stopped. But, now cluster keeps going down, by giving \"shard failure\" error and eventually node getting disconnected. \n\nHad never seen this issue while it was in 1.4 version.\n", "created_at": "2015-09-02T17:16:20Z" }, { "body": "@rakesh91 @zombiepig01 what exceptions are you seeing in your logs?\n\nare you deleting an index then creating a new index with the same name?\n", "created_at": "2015-09-05T12:05:00Z" }, { "body": "@clintongormley when cluster go down, in master \n\n[2015-09-04 15:40:43,172][WARN ][cluster.action.shard ] [esmaster2] [logstash-2015.09.04][9] received shard failed for [logstash-2015.09.04][9], node[2eOY2_zxRwiNtbZRmVxL5g], [P], s[INITIALIZING], unassigned_info[[reason=CLUSTER_RECOVERED], at[2015-09-04T10:10:34.254Z]], indexUUID [pGvcrpH3SzSlX46yYiqQcw], reason [shard failure [failed to create shard][IndexShardCreationException[[logstash-2015.09.04][9] failed to create shard]; nested: LockObtainFailedException[Can't lock shard [logstash-2015.09.04][9], timed out after 5000ms]; ]]\n\nNo I am not deleting the index and re-create with same name, but i delete 28 days old index.\n\nUpon cluster restart, it will be fine, but few times it will fix on its own\n", "created_at": "2015-09-06T17:16:17Z" }, { "body": "upgraded from 1.4.4 -> 1.7.2\n/_bulk (indexing)\n\nlooping errors : \n\nshard-failed ([INDEX-2015-34][1], node[9xgitQcHRR2kPZ7JYpKBtg], [P], s[INITIALIZING], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-09-16T18:26:29.047Z], details[shard failure [failed to create shard][IndexShardCreationException[[INDEX-2015-34][1] failed to create shard]; nested: LockObtainFailedException[Can't lock shard [INDEX-2015-34][1], timed out after 5000ms]; ]]]), reason [shard failure [failed to create shard][IndexShardCreationException[[INDEX-2015-34][1] failed to create shard]; nested: LockObtainFailedException[Can't lock shard [INDEX-2015-34][1], timed out after 5000ms]; ]] \n", "created_at": "2015-09-16T18:49:38Z" }, { "body": "If you're still seeing this \"LockObtainFailedException\", please get a hot threads dump with the following command:\n\n```\ncurl -XGET -s localhost:9200/_nodes/hot_threads?threads=10000\n```\n\nIn particular, we're looking for output like this:\n\n```\n 0.0% (0s out of 500ms) cpu usage by thread 'elasticsearch[host-23][[index-abc-123][0]: Lucene Merge Thread #111975]'\n 10/10 snapshots sharing following 14 elements\n java.lang.Object.wait(Native Method)\n java.lang.Thread.join(Thread.java:1245)\n java.lang.Thread.join(Thread.java:1319)\n org.apache.lucene.index.ConcurrentMergeScheduler.sync(ConcurrentMergeScheduler.java:281)\n org.apache.lucene.index.ConcurrentMergeScheduler.close(ConcurrentMergeScheduler.java:262)\n org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider$CustomConcurrentMergeScheduler.close(ConcurrentMergeSchedulerProvider.java:141)\n org.elasticsearch.index.merge.EnableMergeScheduler.close(EnableMergeScheduler.java:56)\n org.apache.lucene.index.IndexWriter.rollbackInternal(IndexWriter.java:2114)\n org.apache.lucene.index.IndexWriter.rollback(IndexWriter.java:2089)\n org.apache.lucene.index.IndexWriter.tragicEvent(IndexWriter.java:4686)\n org.apache.lucene.index.IndexWriter.merge(IndexWriter.java:3839)\n org.apache.lucene.index.ConcurrentMergeScheduler.doMerge(ConcurrentMergeScheduler.java:409)\n org.apache.lucene.index.TrackingConcurrentMergeScheduler.doMerge(TrackingConcurrentMergeScheduler.java:107)\n org.apache.lucene.index.ConcurrentMergeScheduler$MergeThread.run(ConcurrentMergeScheduler.java:486)\n```\n\nThis is related to this Lucene bug (https://issues.apache.org/jira/browse/LUCENE-6670) where an OOM is thrown on a merge thread.\n", "created_at": "2015-10-08T11:48:21Z" }, { "body": "I seem to be suffering from the same problem. \n\nInterestingly enough, I managed to make it go away on 1.7.4 (https://github.com/elastic/elasticsearch/issues/12926) by removing all nested fields (https://www.elastic.co/guide/en/elasticsearch/reference/2.1/nested.html) from my mappings and reindexing all the data.\n\nI upgraded to 2.0.0 a couple of days ago and then to 2.1.0 yesterday. Today I noticed that the problem returned - could the upgrade have caused it?\n\nI can see shards stuck in INITIALIZING again:\n\n```\n[LIVE] krisba@cubitsearch-client-1:~$ curl localhost:9200/_cat/shards | grep INIT\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0joint_user_summary_v1_all 2 r INITIALIZING 10.0.1.238 cubitsearch-4\njoint_user_summary_v1_all 2 r INITIALIZING 10.0.1.239 cubitsearch-5\njoint_user_summary_v1_all 2 r INITIALIZING 10.0.1.236 cubitsearch-2\njoint_user_summary_v1_all 1 r INITIALIZING 10.0.1.237 cubitsearch-3\njoint_user_summary_v1_all 4 r INITIALIZING 10.0.1.238 cubitsearch-4\njoint_user_summary_v1_all 4 r INITIALIZING 10.0.1.239 cubitsearch-5\njoint_user_summary_v1_all 0 r INITIALIZING 10.0.1.237 cubitsearch-3\njoint_user_summary_v1_all 0 r INITIALIZING 10.0.1.236 cubitsearch-2\n```\n\nHere's a log sample from cubitsearch-4:\n\n```\nDec 12 12:53:40 cubitsearch-4 elasticsearch: Caused by: [joint_user_summary_v1_all][[joint_user_summary_v1_all][0]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\nDec 12 12:53:40 cubitsearch-4 elasticsearch: #011... 7 more\nDec 12 12:53:41 cubitsearch-4 elasticsearch: [2015-12-12 12:53:41,541][DEBUG][action.admin.indices.stats] [cubitsearch-4] [indices:monitor/stats] failed to execute operation for shard [[joint_user_summary_v1_all][1], node[3mL6ye16Ru2o05yVzjoUuQ], [R], v[8697], s[INITIALIZING], a[id=KIkQeU0rTCSzZtFPl0YtlQ], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-12-12T12:42:23.164Z], details[failed to create shard, failure ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms]; ]]]\nDec 12 12:53:41 cubitsearch-4 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:299)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: Caused by: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011... 7 more\nDec 12 12:53:41 cubitsearch-4 elasticsearch: [2015-12-12 12:53:41,549][DEBUG][action.admin.indices.stats] [cubitsearch-4] [indices:monitor/stats] failed to execute operation for shard [[joint_user_summary_v1_all][0], node[3mL6ye16Ru2o05yVzjoUuQ], [R], v[8605], s[INITIALIZING], a[id=IQeeXvCCTweMQpf2O-ncWw], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-12-12T12:43:05.921Z], details[failed to create shard, failure ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][0], timed out after 5000ms]; ]]]\nDec 12 12:53:41 cubitsearch-4 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][0]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:299)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: Caused by: [joint_user_summary_v1_all][[joint_user_summary_v1_all][0]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\nDec 12 12:53:41 cubitsearch-4 elasticsearch: #011... 7 more\nDec 12 12:53:42 cubitsearch-4 elasticsearch: [2015-12-12 12:53:42,608][DEBUG][action.admin.indices.stats] [cubitsearch-4] [indices:monitor/stats] failed to execute operation for shard [[joint_user_summary_v1_all][1], node[3mL6ye16Ru2o05yVzjoUuQ], [R], v[8697], s[INITIALIZING], a[id=KIkQeU0rTCSzZtFPl0YtlQ], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-12-12T12:42:23.164Z], details[failed to create shard, failure ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms]; ]]]\nDec 12 12:53:42 cubitsearch-4 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:299)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: Caused by: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011... 7 more\nDec 12 12:53:42 cubitsearch-4 elasticsearch: [2015-12-12 12:53:42,610][DEBUG][action.admin.indices.stats] [cubitsearch-4] [indices:monitor/stats] failed to execute operation for shard [[joint_user_summary_v1_all][0], node[3mL6ye16Ru2o05yVzjoUuQ], [R], v[8605], s[INITIALIZING], a[id=IQeeXvCCTweMQpf2O-ncWw], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-12-12T12:43:05.921Z], details[failed to create shard, failure ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][0], timed out after 5000ms]; ]]]\nDec 12 12:53:42 cubitsearch-4 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][0]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:299)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: Caused by: [joint_user_summary_v1_all][[joint_user_summary_v1_all][0]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\nDec 12 12:53:42 cubitsearch-4 elasticsearch: #011... 7 more\nDec 12 12:53:44 cubitsearch-4 elasticsearch: [2015-12-12 12:53:44,514][DEBUG][action.admin.indices.stats] [cubitsearch-4] [indices:monitor/stats] failed to execute operation for shard [[joint_user_summary_v1_all][1], node[3mL6ye16Ru2o05yVzjoUuQ], [R], v[8697], s[INITIALIZING], a[id=KIkQeU0rTCSzZtFPl0YtlQ], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-12-12T12:42:23.164Z], details[failed to create shard, failure ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms]; ]]]\nDec 12 12:53:44 cubitsearch-4 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:299)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: Caused by: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\nDec 12 12:53:44 cubitsearch-4 elasticsearch: #011... 7 more\nDec 12 12:53:44 cubitsearch-4 elasticsearch: [2015-12-12 12:53:44,515][DEBUG][action.admin.indices.stats] [cubitsearch-4] [indices:monitor/stats] failed to execute operation for shard [[joint_user_summary_v1_all][0], node[3mL6ye16Ru2o05yVzjoUuQ], [R], v[8605], s[INITIALIZING], a[id=IQeeXvCCTweMQpf2O-ncWw], unassigned_info[[reason=ALLOCATION_FAILED], at[2015-12-12T12:43:05.921Z], details[failed to create shard, failure ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][0], timed out after 5000ms]; ]]]\n```\n\nAnd here's the output of `curl -XGET -s localhost:9200/_nodes/hot_threads?threads=10000`:\n\n[hot_threads.txt](https://github.com/elastic/elasticsearch/files/60134/hot_threads.txt)\n", "created_at": "2015-12-12T13:10:03Z" }, { "body": "Ho-hum. It fixed itself...\n", "created_at": "2015-12-12T18:05:50Z" }, { "body": "@krisb78 these log messages are just saying that you are trying to search on these shards before they have recovered.\n", "created_at": "2015-12-14T13:32:05Z" }, { "body": "@clintongormley -- I recently started seeing those messages again: here is the thread dump:\n\nhttps://gist.github.com/zombiepig01/41bad73814d27fe3a018\n", "created_at": "2015-12-14T23:42:08Z" }, { "body": "I have a large index, about 150GB, this index is stucked whern restart es (running 1 node), this is log:\n\n```\n[logstash-2015.12.15][[logstash-2015.12.15][4]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: [logstash-2015.12.15][[logstash-2015.12.15][4]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\n at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\n at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\n at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\n at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\n ... 7 more\n[2015-12-16 09:57:24,397][DEBUG][action.admin.indices.stats] [log03] [indices:monitor/stats] failed to execute operation for shard [[logstash-2015.12.15][3], node[gEc7JgdmSVyu-EzzdNn7bA], [P], v[7], s[INITIALIZING], a[id=Q8evo1x3Rgy0Ytpd7zpLGQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2015-12-16T02:54:37.577Z]]]\n[logstash-2015.12.15][[logstash-2015.12.15][3]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: [logstash-2015.12.15][[logstash-2015.12.15][3]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\n at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\n at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\n at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\n at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\n ... 7 more\n[2015-12-16 09:57:24,398][DEBUG][action.admin.indices.stats] [log03] [indices:monitor/stats] failed to execute operation for shard [[logstash-2015.12.15][1], node[gEc7JgdmSVyu-EzzdNn7bA], [P], v[7], s[INITIALIZING], a[id=Av2uA3StSOW_8S72m7EqOw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2015-12-16T02:54:37.577Z]]]\n[logstash-2015.12.15][[logstash-2015.12.15][1]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: [logstash-2015.12.15][[logstash-2015.12.15][1]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\n at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\n at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\n at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\n at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\n ... 7 more\n[2015-12-16 09:57:24,398][DEBUG][action.admin.indices.stats] [log03] [indices:monitor/stats] failed to execute operation for shard [[logstash-2015.12.15][2], node[gEc7JgdmSVyu-EzzdNn7bA], [P], v[7], s[INITIALIZING], a[id=3s_-6jJ7Q8WxfAQsFZBJMw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2015-12-16T02:54:37.577Z]]]\n[logstash-2015.12.15][[logstash-2015.12.15][2]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: [logstash-2015.12.15][[logstash-2015.12.15][2]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\n at org.elasticsearch.index.shard.IndexShard.readAllowed(IndexShard.java:974)\n at org.elasticsearch.index.shard.IndexShard.acquireSearcher(IndexShard.java:808)\n at org.elasticsearch.index.shard.IndexShard.docStats(IndexShard.java:628)\n at org.elasticsearch.action.admin.indices.stats.CommonStats.<init>(CommonStats.java:131)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:165)\n at org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction.shardOperation(TransportIndicesStatsAction.java:47)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:401)\n ... 7 more\n[2015-12-16 09:57:24,399][DEBUG][action.admin.indices.stats] [log03] [indices:monitor/stats] failed to execute operation for shard [[logstash-2015.12.15][0], node[gEc7JgdmSVyu-EzzdNn7bA], [P], v[7], s[INITIALIZING], a[id=H4oJgM5dR0GS73NiK7Glsg], unassigned_info[[reason=CLUSTER_RECOVERED], at[2015-12-16T02:54:37.577Z]]]\n[logstash-2015.12.15][[logstash-2015.12.15][0]] BroadcastShardOperationFailedException[operation indices:monitor/stats failed]; nested: IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]];\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.onShardOperation(TransportBroadcastByNodeAction.java:405)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:382)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:371)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: [logstash-2015.12.15][[logstash-2015.12.15][0]] IllegalIndexShardStateException[CurrentState[RECOVERING] operations only allowed when shard state is one of [POST_RECOVERY, STARTED, RELOCATED]]\n```\n\nis it a bug?\n", "created_at": "2015-12-16T03:03:29Z" }, { "body": "@jindov see https://github.com/elastic/elasticsearch/issues/12011#issuecomment-164437888\n", "created_at": "2015-12-16T11:36:04Z" }, { "body": "The original issue here `LockObtainFailedException: Can't lock shard` seems to have been resolved in recent releases. Closing for now. Please reopen if you're seeing the same thing on 2.1 or higher.\n", "created_at": "2016-01-18T10:43:43Z" }, { "body": "@clintongormley do you have any clue what was the cause of the issue and what could have fixed it in > 2.1 ?\n", "created_at": "2016-03-25T14:18:44Z" }, { "body": "@kovrus we had some deadlocks fixed - those were preventing the shard lock from being freed. Do see this with a version >2.1?\n", "created_at": "2016-03-25T14:57:49Z" }, { "body": "I'm hitting this old chestnut again. I upgraded from 2.3.0 to 2.3.3 and all indices are fine (green) except the largest one, which is still yellow.\n\nShards are spending considerable time in the VERIFY_INDEX stage, but then they fail to initialise:\n\nOn the node that tries to initialise a shard, I'm seeing:\n\n```\nJun 2 12:07:00 cubitsearch-5 elasticsearch: [2016-06-02 12:07:00,089][WARN ][indices.cluster ] [cubitsearch-5] [[joint_user_summary_v1_all][2]] marking and sending shard failed due to [failed to create shard]\nJun 2 12:07:00 cubitsearch-5 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][2]] ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][2], timed out after 5000ms];\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:389)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:601)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:501)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:166)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:610)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: Caused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [joint_user_summary_v1_all][2], timed out after 5000ms\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:609)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:537)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:306)\nJun 2 12:07:00 cubitsearch-5 elasticsearch: #011... 10 more\nJun 2 12:07:05 cubitsearch-5 elasticsearch: [2016-06-02 12:07:05,223][WARN ][indices.cluster ] [cubitsearch-5] [[joint_user_summary_v1_all][4]] marking and sending shard failed due to [failed to create shard]\nJun 2 12:07:05 cubitsearch-5 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][4]] ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][4], timed out after 5000ms];\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:389)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:601)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:501)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:166)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:610)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: Caused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [joint_user_summary_v1_all][4], timed out after 5000ms\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:609)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:537)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:306)\nJun 2 12:07:05 cubitsearch-5 elasticsearch: #011... 10 more\nJun 2 12:07:10 cubitsearch-5 elasticsearch: [2016-06-02 12:07:10,318][WARN ][indices.cluster ] [cubitsearch-5] [[joint_user_summary_v1_all][1]] marking and sending shard failed due to [failed to create shard]\nJun 2 12:07:10 cubitsearch-5 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms];\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:389)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:601)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:501)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:166)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:610)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: Caused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:609)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:537)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:306)\nJun 2 12:07:10 cubitsearch-5 elasticsearch: #011... 10 more\nJun 2 12:07:15 cubitsearch-5 elasticsearch: [2016-06-02 12:07:15,421][WARN ][indices.cluster ] [cubitsearch-5] [[joint_user_summary_v1_all][1]] marking and sending shard failed due to [failed to create shard]\nJun 2 12:07:15 cubitsearch-5 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][1]] ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms];\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:389)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:601)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:501)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:166)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:610)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: Caused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [joint_user_summary_v1_all][1], timed out after 5000ms\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:609)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:537)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:306)\nJun 2 12:07:15 cubitsearch-5 elasticsearch: #011... 10 more\nJun 2 12:07:20 cubitsearch-5 elasticsearch: [2016-06-02 12:07:20,830][WARN ][indices.cluster ] [cubitsearch-5] [[joint_user_summary_v1_all][2]] marking and sending shard failed due to [failed to create shard]\nJun 2 12:07:20 cubitsearch-5 elasticsearch: [joint_user_summary_v1_all][[joint_user_summary_v1_all][2]] ElasticsearchException[failed to create shard]; nested: LockObtainFailedException[Can't lock shard [joint_user_summary_v1_all][2], timed out after 5000ms];\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:389)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:601)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:501)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:166)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:610)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at java.lang.Thread.run(Thread.java:745)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: Caused by: org.apache.lucene.store.LockObtainFailedException: Can't lock shard [joint_user_summary_v1_all][2], timed out after 5000ms\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment$InternalShardLock.acquire(NodeEnvironment.java:609)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.env.NodeEnvironment.shardLock(NodeEnvironment.java:537)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011at org.elasticsearch.index.IndexService.createShard(IndexService.java:306)\nJun 2 12:07:20 cubitsearch-5 elasticsearch: #011... 10 more\n```\n\nThe node that \"sends\" the shard to cubitsearch-5 says:\n\n```\nJun 2 11:28:28 cubitsearch-2 elasticsearch: [2016-06-02 11:28:28,961][WARN ][transport ] [cubitsearch-2] Received response for a request that has timed out, sent [2214039ms] ago, timed out [1314039ms] ago, action [internal:index/shard/recovery/prepare_translog], node [{cubitsearch-5}{Dxpr53kUS9qN41nLnLUOXA}{10.0.1.239}{10.0.1.239:9300}{max_local_storage_nodes=1, master=false}], id [479687]\nJun 2 11:34:15 cubitsearch-2 elasticsearch: [2016-06-02 11:34:15,131][WARN ][transport ] [cubitsearch-2] Received response for a request that has timed out, sent [1849861ms] ago, timed out [949861ms] ago, action [internal:index/shard/recovery/prepare_translog], node [{cubitsearch-5}{Dxpr53kUS9qN41nLnLUOXA}{10.0.1.239}{10.0.1.239:9300}{max_local_storage_nodes=1, master=false}], id [482623]\nJun 2 11:47:14 cubitsearch-2 elasticsearch: [2016-06-02 11:47:14,682][WARN ][transport ] [cubitsearch-2] Received response for a request that has timed out, sent [1537976ms] ago, timed out [637976ms] ago, action [internal:index/shard/recovery/prepare_translog], node [{cubitsearch-5}{Dxpr53kUS9qN41nLnLUOXA}{10.0.1.239}{10.0.1.239:9300}{max_local_storage_nodes=1, master=false}], id [487423]\nJun 2 12:06:51 cubitsearch-2 elasticsearch: [2016-06-02 12:06:51,344][WARN ][transport ] [cubitsearch-2] Received response for a request that has timed out, sent [1807416ms] ago, timed out [907416ms] ago, action [internal:index/shard/recovery/prepare_translog], node [{cubitsearch-5}{Dxpr53kUS9qN41nLnLUOXA}{10.0.1.239}{10.0.1.239:9300}{max_local_storage_nodes=1, master=false}], id [507910]\nJun 2 12:09:32 cubitsearch-2 elasticsearch: [2016-06-02 12:09:32,897][WARN ][transport ] [cubitsearch-2] Received response for a request that has timed out, sent [1975624ms] ago, timed out [1075624ms] ago, action [internal:index/shard/recovery/prepare_translog], node [{cubitsearch-5}{Dxpr53kUS9qN41nLnLUOXA}{10.0.1.239}{10.0.1.239:9300}{max_local_storage_nodes=1, master=false}], id [507493]\n```\n\nI tried restarting the nodes a couple of times already and managed to get some more shards to initialise, but some of them are still missing. \n", "created_at": "2016-06-02T12:15:43Z" }, { "body": "@krisb78 what operations are happening on your cluster at the same time? esp, any scrolls?\n", "created_at": "2016-06-02T13:11:21Z" }, { "body": "delete index and recreate?\n", "created_at": "2016-06-02T13:11:37Z" }, { "body": "Already check the scrolls, nothing like that is happening.\n\nWhat seems to have helped a bit was deleting this index manually (i.e., removing the index directory) before restarting a node. I managed to get shards to initialise on 3 nodes by doing that, but the remaining 2 won't budge.\n\nI'm holding off from deleting & recreating the index for now - I have done it to fix this issue before, but I wouldn't want to have to do that on every upgrade...\n", "created_at": "2016-06-02T13:27:32Z" }, { "body": "@krisb78 is it only one node that is causing problems? if so you can try restarting it. Can I the output of `localhost:9200/_cat/recovery`\n", "created_at": "2016-06-02T13:30:26Z" }, { "body": "It was all nodes, really...\n\nHere's what _cat/recovery is saying about this problematic index:\n\n```\njoint_user_summary_v1_all 0 1861391 replica done 10.0.1.239 10.0.1.237 n/a n/a 147 100.0% 17955289909 100.0% 147 17955289909 0 100.0% 0\njoint_user_summary_v1_all 0 2303300 replica done 10.0.1.239 10.0.1.235 n/a n/a 147 100.0% 17955289909 100.0% 147 17955289909 0 100.0% 0\njoint_user_summary_v1_all 1 1931733 replica done 10.0.1.239 10.0.1.236 n/a n/a 162 100.0% 17654494329 100.0% 162 17654494329 0 100.0% 0\njoint_user_summary_v1_all 1 1459125 replica done 10.0.1.239 10.0.1.237 n/a n/a 162 100.0% 17654494329 100.0% 162 17654494329 0 100.0% 0\njoint_user_summary_v1_all 1 856778 replica done 10.0.1.239 10.0.1.235 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\njoint_user_summary_v1_all 2 463469 replica done 10.0.1.239 10.0.1.236 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\njoint_user_summary_v1_all 2 472860 replica done 10.0.1.239 10.0.1.237 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\njoint_user_summary_v1_all 2 1059848 replica done 10.0.1.239 10.0.1.235 n/a n/a 124 100.0% 13853113660 100.0% 124 13853113660 0 100.0% 0\njoint_user_summary_v1_all 3 1717997 replica done 10.0.1.239 10.0.1.236 n/a n/a 128 100.0% 14859107452 100.0% 128 14859107452 0 100.0% 0\njoint_user_summary_v1_all 3 900335 replica done 10.0.1.239 10.0.1.237 n/a n/a 128 100.0% 14859107452 100.0% 128 14859107452 0 100.0% 0\njoint_user_summary_v1_all 3 183371 replica verify_index 10.0.1.236 10.0.1.239 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\njoint_user_summary_v1_all 3 1272129 replica done 10.0.1.239 10.0.1.235 n/a n/a 128 100.0% 14859107452 100.0% 128 14859107452 0 100.0% 0\njoint_user_summary_v1_all 4 1611829 replica done 10.0.1.239 10.0.1.236 n/a n/a 125 100.0% 16055840146 100.0% 125 16055840146 0 100.0% 0\njoint_user_summary_v1_all 4 1250961 replica done 10.0.1.239 10.0.1.237 n/a n/a 125 100.0% 16055840146 100.0% 125 16055840146 0 100.0% 0\njoint_user_summary_v1_all 4 708638 replica verify_index 10.0.1.236 10.0.1.238 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\njoint_user_summary_v1_all 4 708630 replica verify_index 10.0.1.236 10.0.1.239 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\njoint_user_summary_v1_all 4 753186 replica done 10.0.1.239 10.0.1.235 n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0\n```\n\nMy theory is that for some reason `prepare_translog` takes too long:\n\n```\nJun 2 13:37:09 cubitsearch-5 elasticsearch: Caused by: ReceiveTimeoutTransportException[[cubitsearch-5][10.0.1.239:9300][internal:index/shard/recovery/prepare_translog] request_id [709272] timed out after [900001ms]]\n```\n\nOnce this timeout happens, the `Can't lock shard` errors follow until the node is restarted.\n\nLooking at the code, this timeout is dictated by `indices.recovery.internal_action_timeout` or `indices.recovery.internal_action_long_timeout`.\n\nThe documentation doesn't say that these settings are customisable, but I tried to change them anyway now to see if it changes anything.\n", "created_at": "2016-06-02T14:08:19Z" }, { "body": "@bleskes does this ring any bell? I think we cancel both sides of recovery on such a failure?\n", "created_at": "2016-06-02T14:18:04Z" } ], "number": 12011, "title": "cluster stuck in loop \"failed to create shard\"" }
{ "body": "If someone sets `index.shard.check_on_startup`, indexing start up time can be slow (by design, it diligently goes and checks all data). If for some reason the shard is closed in that time, the store ref is kept around and prevents a new shard copy to be allocated to this node via the shard level locks. This is especially tricky if the shard is close due to a cancelled recovery which may re-restart soon.\n\nThis PR adds a cancellable threads instance to each IndexShard and perform index checking underneath it, so it can be cancelled on close. This assumes that:\n\n1) Interrupting a checkIndex is safe.\n2) Interrupting a checkIndex will actually make it stop.\n\nRelates to #12011\n\nPS. We are also discussing not doing a full index check on peer recovery but rather just do a checksum check.\n", "number": 18839, "review_comments": [], "title": "index shard should be able to cancel check index on close." }
{ "commits": [ { "message": "index shard should be able to cancel check index on close." } ], "files": [ { "diff": "@@ -55,6 +55,7 @@\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.BigArrays;\n import org.elasticsearch.common.util.Callback;\n+import org.elasticsearch.common.util.CancellableThreads;\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.SuspendableRefContainer;\n import org.elasticsearch.index.Index;\n@@ -132,8 +133,8 @@\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicLong;\n import java.util.concurrent.atomic.AtomicReference;\n-import java.util.function.Consumer;\n import java.util.function.BiConsumer;\n+import java.util.function.Consumer;\n import java.util.stream.Collectors;\n \n public class IndexShard extends AbstractIndexShardComponent {\n@@ -159,6 +160,7 @@ public class IndexShard extends AbstractIndexShardComponent {\n private final TranslogConfig translogConfig;\n private final IndexEventListener indexEventListener;\n private final QueryCachingPolicy cachingPolicy;\n+ private final CancellableThreads cancellableThreads;\n \n \n /**\n@@ -266,6 +268,7 @@ public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardP\n primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());\n refreshListeners = buildRefreshListeners();\n persistMetadata(shardRouting, null);\n+ cancellableThreads = new CancellableThreads();\n }\n \n public Store store() {\n@@ -589,7 +592,7 @@ public Engine.GetResult get(Engine.Get get) {\n */\n public void refresh(String source) {\n verifyNotClosed();\n- \n+\n if (canIndex()) {\n long bytes = getEngine().getIndexBufferRAMBytesUsed();\n writingBytes.addAndGet(bytes);\n@@ -846,6 +849,7 @@ public void close(String reason, boolean flushEngine) throws IOException {\n } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times\n IOUtils.close(engine);\n }\n+ cancellableThreads.cancel(reason);\n }\n }\n }\n@@ -1284,7 +1288,7 @@ public void noopUpdate(String type) {\n private void checkIndex() throws IOException {\n if (store.tryIncRef()) {\n try {\n- doCheckIndex();\n+ cancellableThreads.executeIO(this::doCheckIndex);\n } finally {\n store.decRef();\n }", "filename": "core/src/main/java/org/elasticsearch/index/shard/IndexShard.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.3\n\n**JVM version**: 1.8.0_91\n\n**OS version**: Ubuntu 16.04 LTS\n\n**Description of the problem including expected versus actual behavior**:\nError response during index mapping\n\n```\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"null_pointer_exception\",\n \"reason\": null\n }\n ],\n \"type\": \"null_pointer_exception\",\n \"reason\": null\n },\n \"status\": 500\n}\n```\n\n**Steps to reproduce**:\n 1.\n\n```\nPUT /haystack/\n{\n \"settings\": {\n \"analysis\": {\n \"filter\": {\n \"haystack_edgengram\": {\n \"max_gram\": 15,\n \"type\": \"edgeNGram\",\n \"min_gram\": 2\n },\n \"haystack_ngram\": {\n \"max_gram\": 15,\n \"type\": \"nGram\",\n \"min_gram\": 3\n }\n },\n \"tokenizer\": {\n \"haystack_ngram_tokenizer\": {\n \"max_gram\": 15,\n \"type\": \"nGram\",\n \"min_gram\": 3\n },\n \"haystack_edgengram_tokenizer\": {\n \"max_gram\": 15,\n \"type\": \"edgeNGram\",\n \"side\": \"front\",\n \"min_gram\": 2\n }\n },\n \"analyzer\": {\n \"edgengram_analyzer\": {\n \"filter\": [\n \"haystack_edgengram\",\n \"lowercase\"\n ],\n \"type\": \"custom\",\n \"tokenizer\": \"standard\"\n },\n \"ngram_analyzer\": {\n \"filter\": [\n \"haystack_ngram\",\n \"lowercase\"\n ],\n \"type\": \"custom\",\n \"tokenizer\": \"standard\"\n }\n }\n }\n }\n}\n```\n\n 2.\n\n```\nPUT /haystack/_mapping/modelresult\n{\n \"properties\": {\n \"inside_id\": {\n \"type\": \"long\"\n },\n \"text\": {\n \"type\": \"string\",\n \"analyzer\": \"edgengram_analyzer\"\n },\n \"django_ct\": {\n \"include_in_all\": false,\n \"index\": \"not_analyzed\",\n \"type\": \"string\"\n },\n \"type_claster_id\": {\n \"type\": \"long\"\n },\n \"closed_at\": {\n \"type\": \"date\"\n },\n \"is_closed\": {\n \"type\": \"boolean\"\n },\n \"geo_type_region_id\": {\n \"type\": \"long\"\n },\n \"priority\": {\n \"type\": \"long\"\n },\n \"shingles\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"board\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"subscribers_names\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"type\": {\n \"type\": \"long\"\n },\n \"claster_code\": {\n \"type\": \"long\"\n },\n \"views\": {\n \"type\": \"long\"\n },\n \"buy_seo_text\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"updated_at\": {\n \"type\": \"date\"\n },\n \"geo_type_slug\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"sell_seo_text\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"geo_type_id\": {\n \"type\": \"long\"\n },\n \"name\": {\n \"type\": \"string\",\n \"analyzer\": \"edgengram_analyzer\"\n },\n \"attached\": {\n \"type\": \"boolean\"\n },\n \"ad_type\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"filters_ids\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"subject\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"django_id\": {\n \"include_in_all\": false,\n \"index\": \"not_analyzed\",\n \"type\": \"string\"\n },\n \"is_approved\": {\n \"type\": \"boolean\"\n },\n \"author\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"content\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"views_today\": {\n \"type\": \"long\"\n },\n \"owner_id\": {\n \"type\": \"long\"\n },\n \"body\": {\n \"type\": \"string\",\n \"analyzer\": \"edgengram_analyzer\"\n },\n \"description\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"parent\": {\n \"type\": \"long\"\n },\n \"key\": {\n \"type\": \"long\"\n },\n \"slug\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"last_activity_at\": {\n \"type\": \"date\"\n },\n \"thread\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n },\n \"url\": {\n \"type\": \"string\",\n \"analyzer\": null\n },\n \"created_at\": {\n \"type\": \"date\"\n },\n \"title\": {\n \"type\": \"string\",\n \"analyzer\": \"edgengram_analyzer\"\n },\n \"geo_object\": {\n \"type\": \"long\"\n },\n \"attributed\": {\n \"type\": \"boolean\"\n },\n \"published\": {\n \"type\": \"boolean\"\n },\n \"primary_filter_id\": {\n \"type\": \"long\"\n },\n \"geo_type_region_slug\": {\n \"type\": \"string\",\n \"analyzer\": \"russian\"\n }\n }\n }\n```\n\n**Provide logs (if relevant)**:\n\n```\n[2016-06-09 17:34:49,914][INFO ][cluster.metadata ] [Micro] [haystack] creating index, cause [api], templates [], shards [5]/[1], mappings []\n[2016-06-09 17:34:50,650][INFO ][cluster.routing.allocation] [Micro] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[haystack][4]] ...]).\n[2016-06-09 17:34:57,408][DEBUG][action.admin.indices.mapping.put] [Micro] failed to put mappings on indices [[haystack]], type [modelresult]\njava.lang.NullPointerException\n at org.elasticsearch.index.mapper.core.TypeParsers.parseAnalyzersAndTermVectors(TypeParsers.java:211)\n at org.elasticsearch.index.mapper.core.TypeParsers.parseTextField(TypeParsers.java:250)\n at org.elasticsearch.index.mapper.core.StringFieldMapper$TypeParser.parse(StringFieldMapper.java:161)\n at org.elasticsearch.index.mapper.object.ObjectMapper$TypeParser.parseProperties(ObjectMapper.java:305)\n at org.elasticsearch.index.mapper.object.ObjectMapper$TypeParser.parseObjectOrDocumentTypeProperties(ObjectMapper.java:218)\n at org.elasticsearch.index.mapper.object.RootObjectMapper$TypeParser.parse(RootObjectMapper.java:139)\n at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:118)\n at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:99)\n at org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:508)\n at org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.applyRequest(MetaDataMappingService.java:257)\n at org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.execute(MetaDataMappingService.java:230)\n at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:468)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n[2016-06-09 17:34:57,409][WARN ][rest.suppressed ] /haystack/_mapping/modelresult Params: {index=haystack, type=modelresult}\njava.lang.NullPointerException\n at org.elasticsearch.index.mapper.core.TypeParsers.parseAnalyzersAndTermVectors(TypeParsers.java:211)\n at org.elasticsearch.index.mapper.core.TypeParsers.parseTextField(TypeParsers.java:250)\n at org.elasticsearch.index.mapper.core.StringFieldMapper$TypeParser.parse(StringFieldMapper.java:161)\n at org.elasticsearch.index.mapper.object.ObjectMapper$TypeParser.parseProperties(ObjectMapper.java:305)\n at org.elasticsearch.index.mapper.object.ObjectMapper$TypeParser.parseObjectOrDocumentTypeProperties(ObjectMapper.java:218)\n at org.elasticsearch.index.mapper.object.RootObjectMapper$TypeParser.parse(RootObjectMapper.java:139)\n at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:118)\n at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:99)\n at org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:508)\n at org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.applyRequest(MetaDataMappingService.java:257)\n at org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.execute(MetaDataMappingService.java:230)\n at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:468)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:772)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n", "comments": [ { "body": "> \"analyzer\": null\n\nThere is your problem! Send a string there....\n\nI'll add a better error message.\n", "created_at": "2016-06-09T18:32:57Z" }, { "body": "Thanks alot for quick response. I spent almost a day for this error.\n", "created_at": "2016-06-10T07:41:55Z" } ], "number": 18803, "title": "null_pointer_exception during index mapping" }
{ "body": "Improve the error message when the user sends something like:\n\n``` js\n\"analyzer\": null\n```\n\nCloses #18803\n", "number": 18809, "review_comments": [], "title": "Better error message when mapping configures null" }
{ "commits": [ { "message": "Better error message when mapping configures null\n\nCloses #18803" } ], "files": [ { "diff": "@@ -239,6 +239,13 @@ public static void parseField(FieldMapper.Builder builder, String name, Map<Stri\n Map.Entry<String, Object> entry = iterator.next();\n final String propName = entry.getKey();\n final Object propNode = entry.getValue();\n+ if (false == propName.equals(\"null_value\") && propNode == null) {\n+ /*\n+ * No properties *except* null_value are allowed to have null. So we catch it here and tell the user something useful rather\n+ * than send them a null pointer exception later.\n+ */\n+ throw new MapperParsingException(\"[\" + propName + \"] must not have a [null] value\");\n+ }\n if (propName.equals(\"store\")) {\n builder.store(parseStore(name, propNode.toString(), parserContext));\n iterator.remove();", "filename": "core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java", "status": "modified" }, { "diff": "@@ -302,4 +302,19 @@ public void testNullValue() throws IOException {\n assertEquals(1457654400000L, dvField.numericValue().longValue());\n assertFalse(dvField.fieldType().stored());\n }\n+\n+ public void testNullConfigValuesFail() throws MapperParsingException, IOException {\n+ String mapping = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"date\")\n+ .field(\"format\", (String) null)\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject().string();\n+\n+ Exception e = expectThrows(MapperParsingException.class, () -> parser.parse(\"type\", new CompressedXContent(mapping)));\n+ assertEquals(\"[format] must not have a [null] value\", e.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.index.engine.Engine;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentMapperParser;\n+import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.MapperService.MergeReason;\n import org.elasticsearch.index.mapper.core.TextFieldMapper.TextFieldType;\n import org.elasticsearch.index.mapper.ParsedDocument;\n@@ -458,4 +459,19 @@ public void testFrequencyFilter() throws IOException {\n assertThat(fieldType.fielddataMaxFrequency(), equalTo((double) Integer.MAX_VALUE));\n assertThat(fieldType.fielddataMinSegmentSize(), equalTo(1000));\n }\n+\n+ public void testNullConfigValuesFail() throws MapperParsingException, IOException {\n+ String mapping = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"field\")\n+ .field(\"type\", \"text\")\n+ .field(\"analyzer\", (String) null)\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject().string();\n+\n+ Exception e = expectThrows(MapperParsingException.class, () -> parser.parse(\"type\", new CompressedXContent(mapping)));\n+ assertEquals(\"[analyzer] must not have a [null] value\", e.getMessage());\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java", "status": "modified" } ] }
{ "body": "Lets say we have a repository with a snapshot `A` created in v2.3.3. Now, if we start ES 5.0 (master branch) and try to restore snapshot `A`, we get these exceptions:\n\n```\nRecovery failed from null into {Gomi}{S29Q6GFKQDC7m8DlfwAiwQ}{127.0.0.1}{127.0.0.1:9300}]; nested: IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null]; ]\nRecoveryFailedException[[i1][2]: Recovery failed from null into {Gomi}{S29Q6GFKQDC7m8DlfwAiwQ}{127.0.0.1}{127.0.0.1:9300}]; nested: IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null];\n at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$133(IndexShard.java:1450)\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:392)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: [i1/o_88kBP1Q8OTmY0VJ-5quA][[i1][2]] IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null];\n at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:311)\n at org.elasticsearch.index.shard.StoreRecovery.recoverFromRepository(StoreRecovery.java:244)\n at org.elasticsearch.index.shard.IndexShard.restoreFromRepository(IndexShard.java:1149)\n at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$133(IndexShard.java:1446)\n ... 4 more\nCaused by: [i1/o_88kBP1Q8OTmY0VJ-5quA][[i1][2]] IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null];\n at org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:413)\n at org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromRepository$387(StoreRecovery.java:246)\n at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:269)\n ... 7 more\nCaused by: [i1/o_88kBP1Q8OTmY0VJ-5quA][[i1][2]] IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null];\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository.restore(BlobStoreIndexShardRepository.java:207)\n at org.elasticsearch.index.shard.StoreRecovery.restore(StoreRecovery.java:408)\n ... 9 more\nCaused by: java.lang.NullPointerException: checksum must not be null\n at java.util.Objects.requireNonNull(Objects.java:228)\n at org.elasticsearch.index.store.StoreFileMetaData.<init>(StoreFileMetaData.java:64)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot$FileInfo.fromXContent(BlobStoreIndexShardSnapshot.java:316)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.fromXContent(BlobStoreIndexShardSnapshot.java:515)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.fromXContent(BlobStoreIndexShardSnapshot.java:45)\n at org.elasticsearch.repositories.blobstore.BlobStoreFormat.read(BlobStoreFormat.java:113)\n at org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat.readBlob(ChecksumBlobStoreFormat.java:111)\n at org.elasticsearch.repositories.blobstore.BlobStoreFormat.read(BlobStoreFormat.java:89)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository$Context.loadSnapshot(BlobStoreIndexShardRepository.java:342)\n at org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository$RestoreContext.restore(BlobStoreIndexShardRepository.java:802)\n```\n\nThese exceptions are related to the `StoreFileMetaData` class throwing an exception if the checksum value is null. This is related to the change found here: https://github.com/elastic/elasticsearch/commit/5008694ba1a140c430a92c05ff84885de6a7d28a\n\nThe problem is, for snapshots created in 2.x, the segments_N files do not have checksums when stored in the repository, so when we try to restore a snapshot from 2.x into ES 5.0, we get this exception thrown. \n\nInterestingly, it does not prevent the index itself from being restored, as I am able to get and search against the index that was restored from the snapshot and retrieve documents.\n\nSteps to reproduce:\n1. Install ES 2.3.3\n2. In the elasticsearch.yml file, add the line: `path.repo: [\"/path/to/repository/dir\"]`\n3. Start ES 2.3.3\n4. Create a repository at the above location: `\ncurl -XPUT localhost:9200/_snapshot/my_repo -d '{\n\"type\": \"fs\",\n\"settings\": {\n\"location\": \"/path/to/repository/dir\",\n\"compress\": false\n}\n}'`\n5. Create an index and index documents:`\ncurl -XPOST localhost:9200/idx1/type1 -d '{ \"name\": \"ali\", \"sane\": \"absolutely not\" }'\ncurl -XPOST localhost:9200/idx1/type1 -d '{ \"name\": \"igor\", \"sane\": \"partially\" }'`\n6. Create a snapshot of the index:`\ncurl -XPUT \"localhost:9200/_snapshot/my_repo/snap1?wait_for_completion=true\" -d '{ \"indices\": [\"idx1\"] }'`\n7. Stop ES 2.3.3\n8. Install ES 5.0 from master branch\n9. In the elasticsearch.yml file, add the line: `path.repo: [\"/path/to/repository/dir\"]`\n10. Start ES 5.0\n11. Repeat step 4\n12. Try to restore the snapshot created earlier:`curl -XPOST \"localhost:9200/_snapshot/my_repo/snap1/_restore\"`\n", "comments": [ { "body": "@s1monw @imotov What do you think is the best approach for solving this? None of the 2.x snapshots will have checksums for the segments_N files.\n", "created_at": "2016-06-02T20:32:19Z" }, { "body": "Also, this only seems to happen when the number of documents in the index are few. I suspect when not all primary shards are populated with at least one document, though I need to dig further to confirm this.\n", "created_at": "2016-06-02T20:46:34Z" }, { "body": "And when this happens, we can not subsequently take a snapshot of the index in question again, getting \"primary shard not allocated\" errors. The reason is evident when looking at the cluster state for the index:\n\n```\n\"idx1\" : {\n \"shards\" : {\n \"2\" : [\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : true,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 2,\n \"index\" : \"idx1\",\n \"restore_source\" : {\n \"repository\" : \"my_repo\",\n \"snapshot\" : \"snap1\",\n \"version\" : \"2.3.2\",\n \"index\" : \"idx1\"\n },\n \"unassigned_info\" : {\n \"reason\" : \"ALLOCATION_FAILED\",\n \"at\" : \"2016-06-02T20:45:10.822Z\",\n \"failed_attempts\" : 5,\n \"delayed\" : false,\n \"details\" : \"failed recovery, failure RecoveryFailedException[[idx1][2]: Recovery failed from null into {Aegis}{TQPQL-DTRaq_HhapThIQSg}{127.0.0.1}{127.0.0.1:9300}]; nested: IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null]; \"\n }\n },\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : false,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 2,\n \"index\" : \"idx1\",\n \"unassigned_info\" : {\n \"reason\" : \"NEW_INDEX_RESTORED\",\n \"at\" : \"2016-06-02T20:45:10.629Z\",\n \"delayed\" : false,\n \"details\" : \"restore_source[my_repo/snap1]\"\n }\n }\n ],\n \"1\" : [\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : false,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 1,\n \"index\" : \"idx1\",\n \"unassigned_info\" : {\n \"reason\" : \"NEW_INDEX_RESTORED\",\n \"at\" : \"2016-06-02T20:45:10.629Z\",\n \"delayed\" : false,\n \"details\" : \"restore_source[my_repo/snap1]\"\n }\n },\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : true,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 1,\n \"index\" : \"idx1\",\n \"restore_source\" : {\n \"repository\" : \"my_repo\",\n \"snapshot\" : \"snap1\",\n \"version\" : \"2.3.2\",\n \"index\" : \"idx1\"\n },\n \"unassigned_info\" : {\n \"reason\" : \"ALLOCATION_FAILED\",\n \"at\" : \"2016-06-02T20:45:10.819Z\",\n \"failed_attempts\" : 5,\n \"delayed\" : false,\n \"details\" : \"failed recovery, failure RecoveryFailedException[[idx1][1]: Recovery failed from null into {Aegis}{TQPQL-DTRaq_HhapThIQSg}{127.0.0.1}{127.0.0.1:9300}]; nested: IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null]; \"\n }\n }\n ],\n \"4\" : [\n {\n \"state\" : \"STARTED\",\n \"primary\" : true,\n \"node\" : \"TQPQL-DTRaq_HhapThIQSg\",\n \"relocating_node\" : null,\n \"shard\" : 4,\n \"index\" : \"idx1\",\n \"restore_source\" : {\n \"repository\" : \"my_repo\",\n \"snapshot\" : \"snap1\",\n \"version\" : \"2.3.2\",\n \"index\" : \"idx1\"\n },\n \"allocation_id\" : {\n \"id\" : \"6Mrum9dpRPGkklfb9lixEA\"\n }\n },\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : false,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 4,\n \"index\" : \"idx1\",\n \"unassigned_info\" : {\n \"reason\" : \"NEW_INDEX_RESTORED\",\n \"at\" : \"2016-06-02T20:45:10.629Z\",\n \"delayed\" : false,\n \"details\" : \"restore_source[my_repo/snap1]\"\n }\n }\n ],\n \"3\" : [\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : false,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 3,\n \"index\" : \"idx1\",\n \"unassigned_info\" : {\n \"reason\" : \"NEW_INDEX_RESTORED\",\n \"at\" : \"2016-06-02T20:45:10.629Z\",\n \"delayed\" : false,\n \"details\" : \"restore_source[my_repo/snap1]\"\n }\n },\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : true,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 3,\n \"index\" : \"idx1\",\n \"restore_source\" : {\n \"repository\" : \"my_repo\",\n \"snapshot\" : \"snap1\",\n \"version\" : \"2.3.2\",\n \"index\" : \"idx1\"\n },\n \"unassigned_info\" : {\n \"reason\" : \"ALLOCATION_FAILED\",\n \"at\" : \"2016-06-02T20:45:10.815Z\",\n \"failed_attempts\" : 5,\n \"delayed\" : false,\n \"details\" : \"failed recovery, failure RecoveryFailedException[[idx1][3]: Recovery failed from null into {Aegis}{TQPQL-DTRaq_HhapThIQSg}{127.0.0.1}{127.0.0.1:9300}]; nested: IndexShardRecoveryException[failed recovery]; nested: IndexShardRestoreFailedException[restore failed]; nested: IndexShardRestoreFailedException[failed to restore snapshot [snap1]]; nested: NullPointerException[checksum must not be null]; \"\n }\n }\n ],\n \"0\" : [\n {\n \"state\" : \"STARTED\",\n \"primary\" : true,\n \"node\" : \"TQPQL-DTRaq_HhapThIQSg\",\n \"relocating_node\" : null,\n \"shard\" : 0,\n \"index\" : \"idx1\",\n \"restore_source\" : {\n \"repository\" : \"my_repo\",\n \"snapshot\" : \"snap1\",\n \"version\" : \"2.3.2\",\n \"index\" : \"idx1\"\n },\n \"allocation_id\" : {\n \"id\" : \"Yli19wMdQnOu4itVUo9IPg\"\n }\n },\n {\n \"state\" : \"UNASSIGNED\",\n \"primary\" : false,\n \"node\" : null,\n \"relocating_node\" : null,\n \"shard\" : 0,\n \"index\" : \"idx1\",\n \"unassigned_info\" : {\n \"reason\" : \"NEW_INDEX_RESTORED\",\n \"at\" : \"2016-06-02T20:45:10.629Z\",\n \"delayed\" : false,\n \"details\" : \"restore_source[my_repo/snap1]\"\n }\n }\n ]\n }\n }\n```\n\nWhile some primaries are activated, others remain unassigned due to the allocation failure resulting from the missing checksum throwing a NPE.\n", "created_at": "2016-06-02T20:59:08Z" } ], "number": 18707, "title": "Restore of 2.x snapshot throws checksum missing exceptions on 5.0" }
{ "body": "When trying to restore a snapshot of an index created in a previous\nversion of Elasticsearch, it is possible that empty shards in the\nsnapshot have a segments_N file that has an unsupported Lucene version\nand a missing checksum. This leads to issues with restoring the\nsnapshot. This commit handles this special case by avoiding a restore\nof a shard that has no data, since there is nothing to restore anyway.\n\nCloses #18707\n", "number": 18784, "review_comments": [ { "body": "should we just use `Version.parseLenient(in.readString())` it must be non-null right?\n", "created_at": "2016-06-09T15:49:51Z" }, { "body": "this should go away here I think it's impl details of a bwc layer in S/R\n", "created_at": "2016-06-09T15:50:22Z" }, { "body": "can we verify `writtenBy` here too ie. if writtenBy is >= 6.0 we must fail since we are guaranteed to have the checksum here?\n", "created_at": "2016-06-09T15:56:16Z" }, { "body": "make this an IllegalStateException?\n", "created_at": "2016-06-09T15:56:35Z" }, { "body": "why do we need this here? I think this entire `hashAndLengthEqual` can and has to go away\n", "created_at": "2016-06-09T15:57:17Z" }, { "body": "why is this fully qualified?\n", "created_at": "2016-06-09T15:57:42Z" }, { "body": "Yes it has to be non-null, the only difference in using `Version.parseLeniently` being we will have to handle the `ParseException` ourselves. I can catch the exception and wrap it into an `IOException` here\n", "created_at": "2016-06-09T16:07:12Z" }, { "body": "I'll add a method to `StoreFileMetaData` with something to the effect of `isUnknownChecksum`, then this field can be `private`. Its still helpful to have a central place for it b/c other places use it (i.e. check on \"_na_\")\n", "created_at": "2016-06-09T16:09:08Z" }, { "body": "yes, good catch\n", "created_at": "2016-06-09T16:09:49Z" }, { "body": "I see what you were saying now, i'll move this to `BlobStoreIndexShardSnapshot`, esp since I no longer need it for hashAndLengthEqual as its going away\n", "created_at": "2016-06-09T16:27:39Z" }, { "body": "ok just add an assert here to be sure `assert writtenBy != null`\n", "created_at": "2016-06-10T07:43:43Z" }, { "body": "Done\n", "created_at": "2016-06-10T13:55:25Z" } ], "title": "Better handling of an empty shard's segments_N file" }
{ "commits": [ { "message": "Better handling of an empty shard's segments_N file\n\nWhen trying to restore a snapshot of an index created in a previous\nversion of Elasticsearch, it is possible that empty shards in the\nsnapshot have a segments_N file that has an unsupported Lucene version\nand a missing checksum. This leads to issues with restoring the\nsnapshot. This commit handles this special case by avoiding a restore\nof a shard that has no data, since there is nothing to restore anyway.\n\nCloses #18707" }, { "message": "Addressed code review comments" } ], "files": [ { "diff": "@@ -62,13 +62,6 @@ public long length() {\n public String checksum() {\n return checksum;\n }\n-\n- public boolean isSame(StoreFileMetaData md) {\n- if (checksum == null || md.checksum() == null) {\n- return false;\n- }\n- return length == md.length() && checksum.equals(md.checksum());\n- }\n }\n \n public static enum Type {", "filename": "core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java", "status": "modified" }, { "diff": "@@ -23,6 +23,8 @@\n import org.apache.lucene.index.IndexCommit;\n import org.apache.lucene.index.IndexFormatTooNewException;\n import org.apache.lucene.index.IndexFormatTooOldException;\n+import org.apache.lucene.index.IndexWriter;\n+import org.apache.lucene.index.IndexWriterConfig;\n import org.apache.lucene.index.SegmentInfos;\n import org.apache.lucene.store.IOContext;\n import org.apache.lucene.store.IndexInput;\n@@ -49,7 +51,6 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.ByteSizeValue;\n import org.elasticsearch.common.util.iterable.Iterables;\n-import org.elasticsearch.index.IndexService;\n import org.elasticsearch.index.shard.ShardId;\n import org.elasticsearch.index.snapshots.IndexShardRepository;\n import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;\n@@ -458,7 +459,9 @@ protected Tuple<BlobStoreIndexShardSnapshots, Integer> buildBlobStoreIndexShardS\n }\n if (latest >= 0) {\n try {\n- return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest);\n+ final BlobStoreIndexShardSnapshots shardSnapshots =\n+ indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest));\n+ return new Tuple<>(shardSnapshots, latest);\n } catch (IOException e) {\n logger.warn(\"failed to read index file [{}]\", e, SNAPSHOT_INDEX_PREFIX + latest);\n }\n@@ -503,10 +506,8 @@ private class SnapshotContext extends Context {\n */\n public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {\n super(snapshotId, Version.CURRENT, shardId);\n- IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());\n- store = indexService.getShardOrNull(shardId.id()).store();\n this.snapshotStatus = snapshotStatus;\n-\n+ store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();\n }\n \n /**\n@@ -788,8 +789,8 @@ private class RestoreContext extends Context {\n */\n public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {\n super(snapshotId, version, shardId, snapshotShardId);\n- store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();\n this.recoveryState = recoveryState;\n+ store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();\n }\n \n /**\n@@ -800,6 +801,25 @@ public void restore() throws IOException {\n try {\n logger.debug(\"[{}] [{}] restoring to [{}] ...\", snapshotId, repositoryName, shardId);\n BlobStoreIndexShardSnapshot snapshot = loadSnapshot();\n+\n+ if (snapshot.indexFiles().size() == 1\n+ && snapshot.indexFiles().get(0).physicalName().startsWith(\"segments_\")\n+ && snapshot.indexFiles().get(0).hasUnknownChecksum()) {\n+ // If the shard has no documents, it will only contain a single segments_N file for the\n+ // shard's snapshot. If we are restoring a snapshot created by a previous supported version,\n+ // it is still possible that in that version, an empty shard has a segments_N file with an unsupported\n+ // version (and no checksum), because we don't know the Lucene version to assign segments_N until we\n+ // have written some data. Since the segments_N for an empty shard could have an incompatible Lucene\n+ // version number and no checksum, even though the index itself is perfectly fine to restore, this\n+ // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty\n+ // shard anyway, we just create the empty shard here and then exit.\n+ IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null)\n+ .setOpenMode(IndexWriterConfig.OpenMode.CREATE)\n+ .setCommitOnClose(true));\n+ writer.close();\n+ return;\n+ }\n+\n SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());\n final Store.MetadataSnapshot recoveryTargetMetadata;\n try {", "filename": "core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java", "status": "modified" }, { "diff": "@@ -22,7 +22,6 @@\n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.Version;\n import org.elasticsearch.ElasticsearchParseException;\n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParseField;\n import org.elasticsearch.common.ParseFieldMatcher;\n import org.elasticsearch.common.Strings;\n@@ -50,6 +49,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil\n * Information about snapshotted file\n */\n public static class FileInfo {\n+ private static final String UNKNOWN_CHECKSUM = \"_na_\";\n+\n private final String name;\n private final ByteSizeValue partSize;\n private final long partBytes;\n@@ -207,27 +208,43 @@ public boolean isSame(StoreFileMetaData md) {\n * @return true if file in a store this this file have the same checksum and length\n */\n public boolean isSame(FileInfo fileInfo) {\n- if (numberOfParts != fileInfo.numberOfParts) return false;\n- if (partBytes != fileInfo.partBytes) return false;\n- if (!name.equals(fileInfo.name)) return false;\n+ if (numberOfParts != fileInfo.numberOfParts) {\n+ return false;\n+ }\n+ if (partBytes != fileInfo.partBytes) {\n+ return false;\n+ }\n+ if (!name.equals(fileInfo.name)) {\n+ return false;\n+ }\n if (partSize != null) {\n- if (!partSize.equals(fileInfo.partSize)) return false;\n+ if (!partSize.equals(fileInfo.partSize)) {\n+ return false;\n+ }\n } else {\n- if (fileInfo.partSize != null) return false;\n+ if (fileInfo.partSize != null) {\n+ return false;\n+ }\n }\n return metadata.isSame(fileInfo.metadata);\n }\n \n- static final class Fields {\n- static final String NAME = \"name\";\n- static final String PHYSICAL_NAME = \"physical_name\";\n- static final String LENGTH = \"length\";\n- static final String CHECKSUM = \"checksum\";\n- static final String PART_SIZE = \"part_size\";\n- static final String WRITTEN_BY = \"written_by\";\n- static final String META_HASH = \"meta_hash\";\n+ /**\n+ * Checks if the checksum for the file is unknown. This only is possible on an empty shard's\n+ * segments_N file which was created in older Lucene versions.\n+ */\n+ public boolean hasUnknownChecksum() {\n+ return metadata.checksum().equals(UNKNOWN_CHECKSUM);\n }\n \n+ static final String NAME = \"name\";\n+ static final String PHYSICAL_NAME = \"physical_name\";\n+ static final String LENGTH = \"length\";\n+ static final String CHECKSUM = \"checksum\";\n+ static final String PART_SIZE = \"part_size\";\n+ static final String WRITTEN_BY = \"written_by\";\n+ static final String META_HASH = \"meta_hash\";\n+\n /**\n * Serializes file info into JSON\n *\n@@ -237,22 +254,22 @@ static final class Fields {\n */\n public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException {\n builder.startObject();\n- builder.field(Fields.NAME, file.name);\n- builder.field(Fields.PHYSICAL_NAME, file.metadata.name());\n- builder.field(Fields.LENGTH, file.metadata.length());\n- if (file.metadata.checksum() != null) {\n- builder.field(Fields.CHECKSUM, file.metadata.checksum());\n+ builder.field(NAME, file.name);\n+ builder.field(PHYSICAL_NAME, file.metadata.name());\n+ builder.field(LENGTH, file.metadata.length());\n+ if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) {\n+ builder.field(CHECKSUM, file.metadata.checksum());\n }\n if (file.partSize != null) {\n- builder.field(Fields.PART_SIZE, file.partSize.bytes());\n+ builder.field(PART_SIZE, file.partSize.bytes());\n }\n \n if (file.metadata.writtenBy() != null) {\n- builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy());\n+ builder.field(WRITTEN_BY, file.metadata.writtenBy());\n }\n \n if (file.metadata.hash() != null && file.metadata().hash().length > 0) {\n- builder.field(Fields.META_HASH, file.metadata.hash());\n+ builder.field(META_HASH, file.metadata.hash());\n }\n builder.endObject();\n }\n@@ -271,26 +288,28 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException {\n String checksum = null;\n ByteSizeValue partSize = null;\n Version writtenBy = null;\n+ String writtenByStr = null;\n BytesRef metaHash = new BytesRef();\n if (token == XContentParser.Token.START_OBJECT) {\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n String currentFieldName = parser.currentName();\n token = parser.nextToken();\n if (token.isValue()) {\n- if (\"name\".equals(currentFieldName)) {\n+ if (NAME.equals(currentFieldName)) {\n name = parser.text();\n- } else if (\"physical_name\".equals(currentFieldName)) {\n+ } else if (PHYSICAL_NAME.equals(currentFieldName)) {\n physicalName = parser.text();\n- } else if (\"length\".equals(currentFieldName)) {\n+ } else if (LENGTH.equals(currentFieldName)) {\n length = parser.longValue();\n- } else if (\"checksum\".equals(currentFieldName)) {\n+ } else if (CHECKSUM.equals(currentFieldName)) {\n checksum = parser.text();\n- } else if (\"part_size\".equals(currentFieldName)) {\n+ } else if (PART_SIZE.equals(currentFieldName)) {\n partSize = new ByteSizeValue(parser.longValue());\n- } else if (\"written_by\".equals(currentFieldName)) {\n- writtenBy = Lucene.parseVersionLenient(parser.text(), null);\n- } else if (\"meta_hash\".equals(currentFieldName)) {\n+ } else if (WRITTEN_BY.equals(currentFieldName)) {\n+ writtenByStr = parser.text();\n+ writtenBy = Lucene.parseVersionLenient(writtenByStr, null);\n+ } else if (META_HASH.equals(currentFieldName)) {\n metaHash.bytes = parser.binaryValue();\n metaHash.offset = 0;\n metaHash.length = metaHash.bytes.length;\n@@ -305,17 +324,37 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException {\n }\n }\n }\n+\n // Verify that file information is complete\n if (name == null || Strings.validFileName(name) == false) {\n throw new ElasticsearchParseException(\"missing or invalid file name [\" + name + \"]\");\n } else if (physicalName == null || Strings.validFileName(physicalName) == false) {\n throw new ElasticsearchParseException(\"missing or invalid physical file name [\" + physicalName + \"]\");\n } else if (length < 0) {\n throw new ElasticsearchParseException(\"missing or invalid file length\");\n+ } else if (writtenBy == null) {\n+ throw new ElasticsearchParseException(\"missing or invalid written_by [\" + writtenByStr + \"]\");\n+ } else if (checksum == null) {\n+ if (physicalName.startsWith(\"segments_\")\n+ && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) {\n+ // its possible the checksum is null for segments_N files that belong to a shard with no data,\n+ // so we will assign it _na_ for now and try to get the checksum from the file itself later\n+ checksum = UNKNOWN_CHECKSUM;\n+ } else {\n+ throw new ElasticsearchParseException(\"missing checksum for name [\" + name + \"]\");\n+ }\n }\n return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize);\n }\n \n+ @Override\n+ public String toString() {\n+ return \"[name: \" + name +\n+ \", numberOfParts: \" + numberOfParts +\n+ \", partSize: \" + partSize +\n+ \", partBytes: \" + partBytes +\n+ \", metadata: \" + metadata + \"]\";\n+ }\n }\n \n private final String snapshot;\n@@ -424,26 +463,21 @@ public long totalSize() {\n return totalSize;\n }\n \n- static final class Fields {\n- static final String NAME = \"name\";\n- static final String INDEX_VERSION = \"index_version\";\n- static final String START_TIME = \"start_time\";\n- static final String TIME = \"time\";\n- static final String NUMBER_OF_FILES = \"number_of_files\";\n- static final String TOTAL_SIZE = \"total_size\";\n- static final String FILES = \"files\";\n- }\n-\n- static final class ParseFields {\n- static final ParseField NAME = new ParseField(\"name\");\n- static final ParseField INDEX_VERSION = new ParseField(\"index_version\", \"index-version\");\n- static final ParseField START_TIME = new ParseField(\"start_time\");\n- static final ParseField TIME = new ParseField(\"time\");\n- static final ParseField NUMBER_OF_FILES = new ParseField(\"number_of_files\");\n- static final ParseField TOTAL_SIZE = new ParseField(\"total_size\");\n- static final ParseField FILES = new ParseField(\"files\");\n- }\n-\n+ private static final String NAME = \"name\";\n+ private static final String INDEX_VERSION = \"index_version\";\n+ private static final String START_TIME = \"start_time\";\n+ private static final String TIME = \"time\";\n+ private static final String NUMBER_OF_FILES = \"number_of_files\";\n+ private static final String TOTAL_SIZE = \"total_size\";\n+ private static final String FILES = \"files\";\n+\n+ private static final ParseField PARSE_NAME = new ParseField(\"name\");\n+ private static final ParseField PARSE_INDEX_VERSION = new ParseField(\"index_version\", \"index-version\");\n+ private static final ParseField PARSE_START_TIME = new ParseField(\"start_time\");\n+ private static final ParseField PARSE_TIME = new ParseField(\"time\");\n+ private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField(\"number_of_files\");\n+ private static final ParseField PARSE_TOTAL_SIZE = new ParseField(\"total_size\");\n+ private static final ParseField PARSE_FILES = new ParseField(\"files\");\n \n /**\n * Serializes shard snapshot metadata info into JSON\n@@ -453,13 +487,13 @@ static final class ParseFields {\n */\n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- builder.field(Fields.NAME, snapshot);\n- builder.field(Fields.INDEX_VERSION, indexVersion);\n- builder.field(Fields.START_TIME, startTime);\n- builder.field(Fields.TIME, time);\n- builder.field(Fields.NUMBER_OF_FILES, numberOfFiles);\n- builder.field(Fields.TOTAL_SIZE, totalSize);\n- builder.startArray(Fields.FILES);\n+ builder.field(NAME, snapshot);\n+ builder.field(INDEX_VERSION, indexVersion);\n+ builder.field(START_TIME, startTime);\n+ builder.field(TIME, time);\n+ builder.field(NUMBER_OF_FILES, numberOfFiles);\n+ builder.field(TOTAL_SIZE, totalSize);\n+ builder.startArray(FILES);\n for (FileInfo fileInfo : indexFiles) {\n FileInfo.toXContent(fileInfo, builder, params);\n }\n@@ -493,24 +527,24 @@ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFiel\n String currentFieldName = parser.currentName();\n token = parser.nextToken();\n if (token.isValue()) {\n- if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) {\n+ if (parseFieldMatcher.match(currentFieldName, PARSE_NAME)) {\n snapshot = parser.text();\n- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) {\n+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_INDEX_VERSION)) {\n // The index-version is needed for backward compatibility with v 1.0\n indexVersion = parser.longValue();\n- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) {\n+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_START_TIME)) {\n startTime = parser.longValue();\n- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) {\n+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_TIME)) {\n time = parser.longValue();\n- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) {\n+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_NUMBER_OF_FILES)) {\n numberOfFiles = parser.intValue();\n- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) {\n+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_TOTAL_SIZE)) {\n totalSize = parser.longValue();\n } else {\n throw new ElasticsearchParseException(\"unknown parameter [{}]\", currentFieldName);\n }\n } else if (token == XContentParser.Token.START_ARRAY) {\n- if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) {\n+ if (parseFieldMatcher.match(currentFieldName, PARSE_FILES)) {\n while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n indexFiles.add(FileInfo.fromXContent(parser));\n }\n@@ -526,6 +560,7 @@ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFiel\n }\n }\n return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles),\n- startTime, time, numberOfFiles, totalSize);\n+ startTime, time, numberOfFiles, totalSize);\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java", "status": "modified" }, { "diff": "@@ -41,7 +41,6 @@\n import org.apache.lucene.store.Lock;\n import org.apache.lucene.store.SimpleFSDirectory;\n import org.apache.lucene.util.ArrayUtil;\n-import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.BytesRefBuilder;\n import org.apache.lucene.util.IOUtils;\n import org.apache.lucene.util.Version;\n@@ -444,11 +443,9 @@ public static void tryOpenIndex(Path indexLocation, ShardId shardId, ESLogger lo\n }\n \n /**\n- * The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version\n- * and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only\n- * a legacy checksum, returned IndexOutput will not verify the checksum.\n+ * The returned IndexOutput validates the files checksum.\n * <p>\n- * Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the\n+ * Note: Checksums are calculated by default since version 4.8.0. This method only adds the\n * verification against the checksum in the given metadata and does not add any significant overhead.\n */\n public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException {\n@@ -652,17 +649,7 @@ final void verifyAfterCleanup(MetadataSnapshot sourceMetaData, MetadataSnapshot\n // different in the diff. That's why we have to double check here again if the rest of it matches.\n \n // all is fine this file is just part of a commit or a segment that is different\n- final boolean same = local.isSame(remote);\n-\n- // this check ensures that the two files are consistent ie. if we don't have checksums only the rest needs to match we are just\n- // verifying that we are consistent on both ends source and target\n- final boolean hashAndLengthEqual = (\n- local.checksum() == null\n- && remote.checksum() == null\n- && local.hash().equals(remote.hash())\n- && local.length() == remote.length());\n- final boolean consistent = hashAndLengthEqual || same;\n- if (consistent == false) {\n+ if (local.isSame(remote) == false) {\n logger.debug(\"Files are different on the recovery target: {} \", recoveryDiff);\n throw new IllegalStateException(\"local version: \" + local + \" is different from remote version after recovery: \" + remote, null);\n }\n@@ -898,18 +885,6 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map\n }\n }\n \n- /**\n- * Computes a strong hash value for small files. Note that this method should only be used for files &lt; 1MB\n- */\n- public static BytesRef hashFile(Directory directory, String file) throws IOException {\n- final BytesRefBuilder fileHash = new BytesRefBuilder();\n- try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {\n- hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length());\n- }\n- return fileHash.get();\n- }\n-\n-\n /**\n * Computes a strong hash value for small files. Note that this method should only be used for files &lt; 1MB\n */", "filename": "core/src/main/java/org/elasticsearch/index/store/Store.java", "status": "modified" }, { "diff": "@@ -21,10 +21,8 @@\n \n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.Version;\n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n-import org.elasticsearch.common.io.stream.Streamable;\n import org.elasticsearch.common.io.stream.Writeable;\n import org.elasticsearch.common.lucene.Lucene;\n \n@@ -58,14 +56,15 @@ public StoreFileMetaData(String name, long length, String checksum, Version writ\n }\n \n public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {\n- assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : \"index version less that \"\n- + FIRST_LUCENE_CHECKSUM_VERSION + \" are not supported but got: \" + writtenBy;\n- Objects.requireNonNull(writtenBy, \"writtenBy must not be null\");\n- Objects.requireNonNull(checksum, \"checksum must not be null\");\n- this.name = name;\n+ // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the\n+ // file is a segments_N file, but that is fine in the case of a segments_N file because\n+ // we handle that case upstream\n+ assert name.startsWith(\"segments_\") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) :\n+ \"index version less that \" + FIRST_LUCENE_CHECKSUM_VERSION + \" are not supported but got: \" + writtenBy;\n+ this.name = Objects.requireNonNull(name, \"name must not be null\");\n this.length = length;\n- this.checksum = checksum;\n- this.writtenBy = writtenBy;\n+ this.checksum = Objects.requireNonNull(checksum, \"checksum must not be null\");\n+ this.writtenBy = Objects.requireNonNull(writtenBy, \"writtenBy must not be null\");\n this.hash = hash == null ? new BytesRef() : hash;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java", "status": "modified" }, { "diff": "@@ -20,7 +20,6 @@\n package org.elasticsearch.indices.recovery;\n \n import org.apache.lucene.util.Version;\n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n@@ -76,7 +75,6 @@ public long position() {\n return position;\n }\n \n- @Nullable\n public String checksum() {\n return metaData.checksum();\n }\n@@ -105,11 +103,9 @@ public void readFrom(StreamInput in) throws IOException {\n String name = in.readString();\n position = in.readVLong();\n long length = in.readVLong();\n- String checksum = in.readOptionalString();\n+ String checksum = in.readString();\n content = in.readBytesReference();\n- Version writtenBy = null;\n- String versionString = in.readOptionalString();\n- writtenBy = Lucene.parseVersionLenient(versionString, null);\n+ Version writtenBy = Lucene.parseVersionLenient(in.readString(), null);\n metaData = new StoreFileMetaData(name, length, checksum, writtenBy);\n lastChunk = in.readBoolean();\n totalTranslogOps = in.readVInt();\n@@ -124,9 +120,9 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeString(metaData.name());\n out.writeVLong(position);\n out.writeVLong(metaData.length());\n- out.writeOptionalString(metaData.checksum());\n+ out.writeString(metaData.checksum());\n out.writeBytesReference(content);\n- out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString());\n+ out.writeString(metaData.writtenBy().toString());\n out.writeBoolean(lastChunk);\n out.writeVInt(totalTranslogOps);\n out.writeLong(sourceThrottleTimeInNanos);", "filename": "core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java", "status": "modified" }, { "diff": "@@ -115,4 +115,5 @@ protected T read(BytesReference bytes) throws IOException {\n \n }\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java", "status": "modified" }, { "diff": "@@ -1,4 +1,3 @@\n-/*\n /*\n * Licensed to Elasticsearch under one or more contributor\n * license agreements. See the NOTICE file distributed with\n@@ -46,6 +45,7 @@\n import java.nio.file.Files;\n import java.nio.file.Path;\n import java.util.ArrayList;\n+import java.util.Arrays;\n import java.util.List;\n import java.util.Locale;\n import java.util.SortedSet;\n@@ -127,6 +127,44 @@ public void testRestoreUnsupportedSnapshots() throws Exception {\n }\n }\n \n+ public void testRestoreSnapshotWithMissingChecksum() throws Exception {\n+ final String repo = \"test_repo\";\n+ final String snapshot = \"test_1\";\n+ final String indexName = \"index-2.3.4\";\n+ final String repoFileId = \"missing-checksum-repo-2.3.4\";\n+ Path repoFile = getBwcIndicesPath().resolve(repoFileId + \".zip\");\n+ URI repoFileUri = repoFile.toUri();\n+ URI repoJarUri = new URI(\"jar:\" + repoFileUri.toString() + \"!/repo/\");\n+ logger.info(\"--> creating repository [{}] for repo file [{}]\", repo, repoFileId);\n+ assertAcked(client().admin().cluster().preparePutRepository(repo)\n+ .setType(\"url\")\n+ .setSettings(Settings.builder().put(\"url\", repoJarUri.toString())));\n+\n+ logger.info(\"--> get snapshot and check its indices\");\n+ GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get();\n+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));\n+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);\n+ assertThat(snapshotInfo.indices(), equalTo(Arrays.asList(indexName)));\n+\n+ logger.info(\"--> restoring snapshot\");\n+ RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get();\n+ assertThat(response.status(), equalTo(RestStatus.OK));\n+ RestoreInfo restoreInfo = response.getRestoreInfo();\n+ assertThat(restoreInfo.successfulShards(), greaterThan(0));\n+ assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards()));\n+ assertThat(restoreInfo.failedShards(), equalTo(0));\n+ String index = restoreInfo.indices().get(0);\n+ assertThat(index, equalTo(indexName));\n+\n+ logger.info(\"--> check search\");\n+ SearchResponse searchResponse = client().prepareSearch(index).get();\n+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));\n+\n+ logger.info(\"--> cleanup\");\n+ cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()]));\n+ cluster().wipeTemplates();\n+ }\n+\n private List<String> repoVersions() throws Exception {\n return listRepoVersions(\"repo\");\n }", "filename": "core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java", "status": "modified" }, { "diff": "@@ -27,7 +27,7 @@\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.common.xcontent.XContentType;\n-import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.Fields;\n+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo;\n import org.elasticsearch.index.store.StoreFileMetaData;\n import org.elasticsearch.test.ESTestCase;\n \n@@ -105,11 +105,11 @@ public void testInvalidFieldsInFromXContent() throws IOException {\n \n XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);\n builder.startObject();\n- builder.field(Fields.NAME, name);\n- builder.field(Fields.PHYSICAL_NAME, physicalName);\n- builder.field(Fields.LENGTH, length);\n- builder.field(Fields.WRITTEN_BY, Version.LATEST.toString());\n- builder.field(Fields.CHECKSUM, \"666\");\n+ builder.field(FileInfo.NAME, name);\n+ builder.field(FileInfo.PHYSICAL_NAME, physicalName);\n+ builder.field(FileInfo.LENGTH, length);\n+ builder.field(FileInfo.WRITTEN_BY, Version.LATEST.toString());\n+ builder.field(FileInfo.CHECKSUM, \"666\");\n builder.endObject();\n byte[] xContent = builder.bytes().toBytes();\n ", "filename": "core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java", "status": "modified" }, { "diff": "", "filename": "core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip", "status": "added" } ] }
{ "body": "Using 2.2.0, I am unable to delete by query for data that has been indexed using `external_gte` [version type](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types). Here's the error that I'm receiving:\n\n```\n[ec2-user@es1-dev ~]$ curl -XDELETE 'http://es1:9200/testindex/_query?q=repo:testing'\n{\"error\":{\"root_cause\":[{\"type\":\"action_request_validation_exception\",\"reason\":\"Validation Failed: 1: illegal version value [0] for version type [INTERNAL];2: illegal version value [0] for version type [INTERNAL];3: illegal version value [0] for version type [INTERNAL];4: illegal version value [0] for version type [INTERNAL];5: illegal version value [0] for version type [INTERNAL];6: illegal version value [0] for version type [INTERNAL];7: illegal version value [0] for version type [INTERNAL];8: illegal version value [0] for version type [INTERNAL];9: illegal version value [0] for version type [INTERNAL];10: illegal version value [0] for version type [INTERNAL];\"}],\"type\":\"action_request_validation_exception\",\"reason\":\"Validation Failed: 1: illegal version value [0] for version type [INTERNAL];2: illegal version value [0] for version type [INTERNAL];3: illegal version value [0] for version type [INTERNAL];4: illegal version value [0] for version type [INTERNAL];5: illegal version value [0] for version type [INTERNAL];6: illegal version value [0] for version type [INTERNAL];7: illegal version value [0] for version type [INTERNAL];8: illegal version value [0] for version type [INTERNAL];9: illegal version value [0] for version type [INTERNAL];10: illegal version value [0] for version type [INTERNAL];\"},\"status\":400}\n```\n\nThe delete by query succeeds for an index that doesn't use `external_gte`.\n\nthanks!\n", "comments": [ { "body": "@bleskes what do you think?\n", "created_at": "2016-02-14T00:13:23Z" }, { "body": "This indeed an unfortunate case where internal and external versioning do not mix well. Internal version mean that ES is the source of truth for changes - it is incremented with every change in ES and starts with 1. External versioning assumes that some other system tracks document changes (including deletes). Originally 0 was an invalid value for external versioning but it wasn't enforced in code. When we fixed the latter people complained and we have changed semantics to allow 0 as a valid external value (see https://github.com/elastic/elasticsearch/issues/5662). Now you can insert a value that's valid as an external version but is illegal for internal.\n\nThe delete by query plugin uses internal versioning to make sure the documents it deletes didn't change during it's operations. However, since the documents were indexed using the external versioning, their version is 0 which is illegal. \n\nCan you tell us a bit more about your setup? Why are you using the delete by query plugin where you have some external source of truth? I would presume you would delete documents there first and have those propagated to ES as deletes with an external version?\n", "created_at": "2016-02-15T14:17:36Z" }, { "body": "We receive our data from a third-party that supplies versions, starting with 0. For one of our indexes, we only care about the most recent version of a given resource, but need to be able to support reloading old data (mapping changes, etc). In order to ensure we're only keeping the latest (regardless of order received) we've gone with indexing using `external_gte`. Our process simply ignores the VersionConflictException that gets returned when attempting to add an older version. It has worked rather well for us.\n\nPeriodically, we'll need to delete data, for a variety of reasons. These are one-off deletes, usually related to expiring license agreements and such, and are separate from any versioning scheme. Historically we've just manually done a delete by query to handle these cases, which has served us well until recently.\n", "created_at": "2016-02-15T15:27:01Z" }, { "body": "I'm using internal indexing and hitting this on index...\n\n illegal version value [0] for version type [INTERNAL];\n", "created_at": "2016-09-29T19:37:17Z" }, { "body": "@niemyjski as we discussed in another issue, your issue is different than this one.\n\n@natelapp thanks for the update. The problem is that currently doesn't align with the main use case for external versioning, where some external source owns all changes to the documents, including deletes. I haven't come up with a clean way of allowing you to do what you need plus making other use cases work without surprises. As a workaround for now, I think the easiest for you is to always +1 the version you get from your data source (to allow a delete by query operation).\n", "created_at": "2016-10-03T11:35:55Z" }, { "body": "@natelapp thanks for reporting this issue. The issue has been fixed in the upcoming 6.7 and 7.0 versions and I will therefore close this issue.", "created_at": "2019-03-26T13:44:22Z" } ], "number": 16654, "title": "ES 2.2.0 delete by query plugin fails for data with external versioning" }
{ "body": "This commit adds a `version_type` option to the request body for both Update-by-query and Delete-by-query. The option can take the value `internal` (default) and `force`. This last one can help to update or delete documents that have been created with an external version number equal to zero.\n\ncloses #16654\n", "number": 18750, "review_comments": [], "title": "Expose version type to Update & Delete by query" }
{ "commits": [ { "message": "Expose version type to Update & Delete by query\n\nThis commit adds a `version_type` option to the request body for both Update-by-query and Delete-by-query. The option can take the value `internal` (default) and `force`. This last one can help to update or delete documents that have been created with an external version number equal to zero.\n\ncloses #16654" } ], "files": [ { "diff": "@@ -69,6 +69,30 @@ these documents. In case a search or bulk request got rejected, `_delete_by_quer\n If you'd like to count version conflicts rather than cause them to abort then\n set `conflicts=proceed` on the url or `\"conflicts\": \"proceed\"` in the request body.\n \n+By default `_delete_by_query` uses `internal` versioning and expects version\n+numbers to be greater or equal to 1. If the version of documents are externally managed\n+ and are equal to zero, executing `_delete_by_query` will report version conflicts.\n+In this specific case, you can change the version type from the default `internal`\n+ to the `force` value. The documents will then be deleted regardless of the version\n+ of the stored documents. Note that forcing the version should be used with care.\n+\n+[source,js]\n+--------------------------------------------------\n+POST twitter/_delete_by_query\n+{\n+ \"query\": {\n+ \"term\": {\n+ \"user\": \"kimchy\"\n+ }\n+ },\n+ \"version_type\": \"force\" <1>\n+}\n+--------------------------------------------------\n+// TEST[setup:twitter]\n+\n+<1> Indicates to use a forced version type. Possible values are either `internal`\n+or `force`.\n+\n Back to the API format, you can limit `_delete_by_query` to a single type. This\n will only delete `tweet` documents from the `twitter` index:\n ", "filename": "docs/reference/docs/delete-by-query.asciidoc", "status": "modified" }, { "diff": "@@ -46,6 +46,30 @@ conflict if the document changes between the time when the snapshot was taken\n and when the index request is processed. When the versions match the document\n is updated and the version number is incremented.\n \n+By default `_update_by_query` uses `internal` versioning and expects version\n+numbers to be greater or equal to 1. If the version of documents are externally managed\n+ and are equal to zero, executing `_update_by_query` will report version conflicts.\n+In this specific case, you can change the version type from the default `internal`\n+ to the `force` value. The documents will then be updated regardless of the version\n+ of the stored documents. Note that forcing the version should be used with care.\n+\n+[source,js]\n+--------------------------------------------------\n+POST twitter/_update_by_query\n+{\n+ \"query\": {\n+ \"term\": {\n+ \"user\": \"kimchy\"\n+ }\n+ },\n+ \"version_type\": \"force\" <1>\n+}\n+--------------------------------------------------\n+// TEST[setup:twitter]\n+\n+<1> Indicates to use a forced version type. Possible values are either `internal`\n+or `force`.\n+\n All update and query failures cause the `_update_by_query` to abort and are\n returned in the `failures` of the response. The updates that have been\n performed still stick. In other words, the process is not rolled back, only", "filename": "docs/reference/docs/update-by-query.asciidoc", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.elasticsearch.action.IndicesRequest;\n import org.elasticsearch.action.search.SearchRequest;\n import org.elasticsearch.action.support.IndicesOptions;\n+import org.elasticsearch.index.VersionType;\n \n import static org.elasticsearch.action.ValidateActions.addValidationError;\n \n@@ -45,6 +46,11 @@\n */\n public class DeleteByQueryRequest extends AbstractBulkByScrollRequest<DeleteByQueryRequest> implements IndicesRequest {\n \n+ /**\n+ * Versioning type to set on index requests made by this action.\n+ */\n+ private VersionType versionType = VersionType.INTERNAL;\n+\n public DeleteByQueryRequest() {\n }\n \n@@ -54,6 +60,14 @@ public DeleteByQueryRequest(SearchRequest search) {\n search.source().fetchSource(false);\n }\n \n+ public VersionType getVersionType() {\n+ return versionType;\n+ }\n+\n+ public void setVersionType(VersionType versionType) {\n+ this.versionType = versionType;\n+ }\n+\n @Override\n protected DeleteByQueryRequest self() {\n return this;\n@@ -68,6 +82,11 @@ public ActionRequestValidationException validate() {\n if (getSearchRequest() == null || getSearchRequest().source() == null) {\n e = addValidationError(\"source is missing\", e);\n }\n+ if (versionType == null) {\n+ e = addValidationError(\"version type is missing\", e);\n+ } else if (versionType != VersionType.INTERNAL && versionType != VersionType.FORCE) {\n+ e = addValidationError(\"version type [\" + versionType.toString() + \"] is not supported\", e);\n+ }\n return e;\n }\n ", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java", "status": "modified" }, { "diff": "@@ -25,6 +25,7 @@\n import org.elasticsearch.cluster.service.ClusterService;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.VersionType;\n import org.elasticsearch.indices.query.IndicesQueriesRegistry;\n import org.elasticsearch.rest.RestChannel;\n import org.elasticsearch.rest.RestController;\n@@ -69,6 +70,7 @@ protected DeleteByQueryRequest buildRequest(RestRequest request) throws IOExcept\n \n Map<String, Consumer<Object>> consumers = new HashMap<>();\n consumers.put(\"conflicts\", o -> internal.setConflicts((String) o));\n+ consumers.put(\"version_type\", o -> internal.setVersionType(VersionType.fromString((String) o)));\n \n parseInternalRequest(internal, request, consumers);\n ", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import org.elasticsearch.cluster.service.ClusterService;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.VersionType;\n import org.elasticsearch.indices.query.IndicesQueriesRegistry;\n import org.elasticsearch.rest.RestChannel;\n import org.elasticsearch.rest.RestController;\n@@ -68,6 +69,7 @@ protected UpdateByQueryRequest buildRequest(RestRequest request) throws IOExcept\n Map<String, Consumer<Object>> consumers = new HashMap<>();\n consumers.put(\"conflicts\", o -> internal.setConflicts((String) o));\n consumers.put(\"script\", o -> internal.setScript(Script.parse((Map<String, Object>)o, false, parseFieldMatcher)));\n+ consumers.put(\"version_type\", o -> internal.setVersionType(VersionType.fromString((String) o)));\n \n parseInternalRequest(internal, request, consumers);\n ", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java", "status": "modified" }, { "diff": "@@ -91,6 +91,7 @@ protected RequestWrapper<DeleteRequest> buildRequest(SearchHit doc) {\n delete.type(doc.type());\n delete.id(doc.id());\n delete.version(doc.version());\n+ delete.versionType(mainRequest.getVersionType());\n return wrap(delete);\n }\n ", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java", "status": "modified" }, { "diff": "@@ -104,8 +104,8 @@ protected RequestWrapper<IndexRequest> buildRequest(SearchHit doc) {\n index.type(doc.type());\n index.id(doc.id());\n index.source(doc.sourceRef());\n- index.versionType(VersionType.INTERNAL);\n index.version(doc.version());\n+ index.versionType(mainRequest.getVersionType());\n index.setPipeline(mainRequest.getPipeline());\n return wrap(index);\n }", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java", "status": "modified" }, { "diff": "@@ -19,15 +19,18 @@\n \n package org.elasticsearch.index.reindex;\n \n+import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.action.CompositeIndicesRequest;\n import org.elasticsearch.action.IndicesRequest;\n import org.elasticsearch.action.index.IndexRequest;\n import org.elasticsearch.action.search.SearchRequest;\n+import org.elasticsearch.index.VersionType;\n \n import java.util.ArrayList;\n import java.util.List;\n \n import static java.util.Collections.unmodifiableList;\n+import static org.elasticsearch.action.ValidateActions.addValidationError;\n \n /**\n * Request to update some documents. That means you can't change their type, id, index, or anything like that. This implements\n@@ -36,6 +39,12 @@\n * destination index and things.\n */\n public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest<UpdateByQueryRequest> implements CompositeIndicesRequest {\n+\n+ /**\n+ * Versioning type to set on index requests made by this action.\n+ */\n+ private VersionType versionType = VersionType.INTERNAL;\n+\n /**\n * Ingest pipeline to set on index requests made by this action.\n */\n@@ -62,11 +71,30 @@ public String getPipeline() {\n return pipeline;\n }\n \n+ public VersionType getVersionType() {\n+ return versionType;\n+ }\n+\n+ public void setVersionType(VersionType versionType) {\n+ this.versionType = versionType;\n+ }\n+\n @Override\n protected UpdateByQueryRequest self() {\n return this;\n }\n \n+ @Override\n+ public ActionRequestValidationException validate() {\n+ ActionRequestValidationException e = super.validate();\n+ if (versionType == null) {\n+ e = addValidationError(\"version type is missing\", e);\n+ } else if (versionType != VersionType.INTERNAL && versionType != VersionType.FORCE) {\n+ e = addValidationError(\"version type [\" + versionType.toString() + \"] is not supported\", e);\n+ }\n+ return e;\n+ }\n+\n @Override\n public String toString() {\n StringBuilder b = new StringBuilder();", "filename": "modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java", "status": "modified" }, { "diff": "@@ -0,0 +1,245 @@\n+---\n+\"delete_by_query with various versioning types and version zero\":\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 1\n+ version: 0 # Starting version is zero\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ # Delete by query with default version_type (\"internal\") must fail\n+ # because zero is not allowed as an internal version number\n+ - do:\n+ catch: /illegal version value \\[0\\] for version type \\[INTERNAL\\]./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ query:\n+ match_all: {}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Delete by query with explicit version_type \"internal\" must fail (see previous for explanation)\n+ - do:\n+ catch: /illegal version value \\[0\\] for version type \\[INTERNAL\\]./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: internal\n+ query:\n+ match_all: {}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Delete by query with version_type \"external\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: external\n+ query:\n+ match_all: {}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Delete by query with version_type \"external_gt\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: external_gt\n+ query:\n+ match_all: {}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Delete by query with version_type of \"external_gte\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL_GTE\\] is not supported./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: external_gte\n+ query:\n+ match_all: {}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Delete by query with version_type of \"force\" must succeed\n+ - do:\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: force\n+ query:\n+ match_all: {}\n+ - match: {deleted: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ catch: missing\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {found: false}\n+\n+\n+---\n+\"delete_by_query with various versioning types and value greater than zero\":\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 1\n+ version: 123\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ # Delete by query with default version_type (\"internal\") must succeed\n+ - do:\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ query:\n+ match_all: {}\n+ - match: {deleted: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ catch: missing\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {found: false}\n+\n+ # Delete by query with explicit version_type \"internal\" must succeed\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 2\n+ version: 123\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: internal\n+ query:\n+ match_all: {}\n+ - match: {deleted: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ catch: missing\n+ get:\n+ index: index1\n+ type: type1\n+ id: 2\n+ - match: {found: false}\n+\n+ # Delete by query with version_type \"external\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: external\n+ query:\n+ match_all: {}\n+\n+ # Delete by query with version_type \"external_gt\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: external_gt\n+ query:\n+ match_all: {}\n+\n+ # Delete by query with version_type of \"external_gte\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL_GTE\\] is not supported./\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: external_gte\n+ query:\n+ match_all: {}\n+\n+ # Delete by query with version_type of \"force\" must succeed\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 3\n+ version: 123\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ delete_by_query:\n+ index: index1\n+ refresh: true\n+ body:\n+ version_type: force\n+ query:\n+ match_all: {}\n+ - match: {deleted: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ catch: missing\n+ get:\n+ index: index1\n+ type: type1\n+ id: 3\n+ - match: {found: false}", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yaml", "status": "added" }, { "diff": "@@ -21,3 +21,205 @@\n type: test\n id: 1\n - match: {_version: 2}\n+\n+---\n+\"update_by_query with various versioning types and version zero\":\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 1\n+ version: 0 # Starting version is zero\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ # Update by query with default version_type (\"internal\") must fail\n+ # because zero is not allowed as an internal version number\n+ - do:\n+ catch: /illegal version value \\[0\\] for version type \\[INTERNAL\\]./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Update by query with explicit version_type \"internal\" must fail (see previous for explanation)\n+ - do:\n+ catch: /illegal version value \\[0\\] for version type \\[INTERNAL\\]./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": internal}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Update by query with version_type \"external\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": external}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Update by query with version_type \"external_gt\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": external_gt}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Update by query with version_type of \"external_gte\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL_GTE\\] is not supported./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": external_gte}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0}\n+\n+ # Update by query with version_type of \"force\" must succeed\n+ - do:\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": force}\n+ - match: {updated: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 0} # Updated with same version number\n+\n+\n+---\n+\"update_by_query with various versioning types and value greater than zero\":\n+ - do:\n+ index:\n+ index: index1\n+ type: type1\n+ id: 1\n+ version: 123\n+ version_type: external\n+ body: {\"update\": 0}\n+ - do:\n+ indices.refresh: {}\n+\n+ # Update by query with default version_type (\"internal\") must succeed\n+ - do:\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ - match: {updated: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 124}\n+\n+ # Update by query with explicit version_type \"internal\" must succeed\n+ - do:\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": internal}\n+ - match: {updated: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 125}\n+\n+ # Update by query with version_type \"external\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": external}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 125}\n+\n+ # Update by query with version_type \"external_gt\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL\\] is not supported./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": external_gt}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 125}\n+\n+ # Update by query with version_type of \"external_gte\" is not supported\n+ - do:\n+ catch: /version type \\[EXTERNAL_GTE\\] is not supported./\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": external_gte}\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 125}\n+\n+ # Update by query with version_type of \"force\" must succeed\n+ - do:\n+ update_by_query:\n+ index: index1\n+ refresh: true\n+ body: {\"version_type\": force}\n+ - match: {updated: 1}\n+ - match: {version_conflicts: 0}\n+\n+ - do:\n+ get:\n+ index: index1\n+ type: type1\n+ id: 1\n+ - match: {_version: 125} # Updated with same version number", "filename": "modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yaml", "status": "modified" }, { "diff": "@@ -168,10 +168,6 @@\n \"type\" : \"boolean\",\n \"description\" : \"Specify whether to return document version as part of a hit\"\n },\n- \"version_type\": {\n- \"type\" : \"boolean\",\n- \"description\" : \"Should the document increment the version number (internal) on hit or not (reindex)\"\n- },\n \"request_cache\": {\n \"type\" : \"boolean\",\n \"description\" : \"Specify if request cache should be used for this request or not, defaults to index level setting\"", "filename": "rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json", "status": "modified" } ] }
{ "body": "Exceptions from scripts are not handled well today:\n- There is a lot of repetitive unnecessary boxing and wrapping of exceptions at multiple layers. \n- Despite this verbosity, no stacktrace information is given by default (!). Users of this are like \"developers\" and need to know _where_ in their script something went wrong.\n- If the user passes the rest parameter (which is very long) to enable stacktrace output, they get a massive verbose toString, containing all kinds of irrelevant ES elements, and formatted as a terribly ugly massive string.\n- ScriptException is meaningless, it doesn't provide any more structure over a RuntimeException!\n- json is the worst possible way to transmit source code. We can count on having no line numbers (which always makes stacktraces meaningful), instead just everything shoved on one massive line.\n\nThe current situation just encourages even more bad exception handling. People complain if you get NullPointerException when you dereference a null pointer, but that is exactly what you deserve if you do this! And it tells you all you need to figure it out: its right there in the stacktrace with line numbers!\n\nWe should just \"box\" the exception a single time, with all the stuff you need to figure out the problem by default. That means telling you what script hit the problem, what language it was in, a \"simple\" reason of what happened, and a relevant \"stacktrace\" to the script. The original root cause is always preserved, if someone asks for the stacktrace or looks at the logs, they will see all that really happened, this is more about the horror of whats happening today at the REST layer.\n\nIn order to contend with \"everything shoved on a single line\", we have to do some highlighting. Normally, line numbers and existing lines work great for code, but since we don't have those, we have to work with what we've got. \n\nFor painless that means, encoding offsets as line numbers into the bytecode, wherever an exception can strike. It also means breaking down the (probably massive) script into sentences that have enough meaningful context: we use leaf S\\* nodes as \"sentences\".\n\nFor expressions it means when a \"variable\" goes wrong (e.g. not in mappings), we just highlight what went wrong with that variable definition, etc.\n\nOther scripting engines can be adapted to use this, e.g. you can get some of this kind of info from at least some groovy/javascript/python exceptions. For now, they still keep using the deprecated \"GeneralScriptException\" and do the same as before. If someone else wants to improve them, great.\n\nHere are some example runtime errors:\n\n```\n{\n \"type\" : \"script_exception\",\n \"reason\" : \"runtime error\",\n \"caused_by\" : {\n \"type\" : \"unsupported_operation_exception\",\n \"reason\" : null\n },\n \"script_stack\" : [\n \"java.util.AbstractList.add(AbstractList.java:148)\"\n \"java.util.AbstractList.add(AbstractList.java:108)\"\n \"return x.add(5);\",\n \" ^---- HERE\"\n ],\n \"script\" : \"def x = Collections.emptyList(); return x.add(5);\",\n \"lang\" : \"painless\"\n}\n\n{\n \"type\" : \"script_exception\",\n \"reason\" : \"runtime error\",\n \"caused_by\" : {\n \"type\" : \"null_pointer_exception\",\n \"reason\" : null\n },\n \"script_stack\" : [\n \"x = Math.log(Math.abs(ctx.sometypo.getSomething())); \",\n \" ^---- HERE\"\n ],\n \"script\" : \"double y = ctx.thing; double x = Math.log(Math.abs(ctx.sometypo.getSomething())); return x * 5 + y;\",\n \"lang\" : \"painless\"\n}\n```\n\nIt helps also for compile-time errors (and the \"link time\" of expressions where it binds against the mappings):\n\n```\n{\n \"type\" : \"script_exception\",\n \"reason\" : \"compile error\",\n \"caused_by\" : {\n \"type\" : \"parse_exception\",\n \"reason\" : \"invalid sequence of tokens near '*' on line (1) position (17)\",\n \"caused_by\" : {\n \"type\" : \"no_viable_alt_exception\",\n \"reason\" : null\n }\n },\n \"script_stack\" : [\n \"doc['d'].value * *@#)(@$*@#$ + 4\",\n \" ^---- HERE\"\n ],\n \"script\" : \"doc['d'].value * *@#)(@$*@#$ + 4\",\n \"lang\" : \"expression\"\n}\n\n{\n \"type\" : \"script_exception\",\n \"reason\" : \"link error\",\n \"caused_by\" : {\n \"type\" : \"parse_exception\",\n \"reason\" : \"Field [e] does not exist in mappings\"\n },\n \"script_stack\" : [\n \"doc['e'].value\",\n \" ^---- HERE\"\n ],\n \"script\" : \"doc['e'].value * 5\",\n \"lang\" : \"expression\"\n}\n```\n", "comments": [ { "body": "LGTM. Thanks for making these changes, exceptions just got a whole lot better 👍 ! Left one minor comment.\n", "created_at": "2016-05-26T16:29:33Z" }, { "body": "Looked through the exception related changes and LGTM. I left a nitpick :)\n\nthis is a fantastic improvement! thanks for doing this\n", "created_at": "2016-05-26T18:31:11Z" } ], "number": 18600, "title": "replace ScriptException with a better one" }
{ "body": "Followup of https://github.com/elastic/elasticsearch/pull/18600\n#18600 only improved painless exceptions for the runtime case. This gives the same format for errors that happen at compile time (lexing, parsing, analysis).\n\nThe goals are the same: don't make exception handling complicated to code and tests, don't wrap exceptions with BS ones, use the correct exceptions.\n\nIn most cases changes just look like this:\n\n```\n- throw new IllegalArgumentException(error(\"Extraneous for loop.\"));\n+ throw createError(new IllegalArgumentException(\"Extraneous for loop.\"));\n```\n\nThe original exception is returned, but with an artificial stack frame added, which looks just like the runtime case. This allows for more consistency (maybe more code sharing in the future too). \n", "number": 18711, "review_comments": [], "title": "Improve painless compile-time exceptions" }
{ "commits": [ { "message": "Improve painless compile-time exceptions" } ], "files": [ { "diff": "@@ -22,14 +22,15 @@\n import org.elasticsearch.painless.Definition.Cast;\n import org.elasticsearch.painless.Definition.Sort;\n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.node.ANode;\n \n /**\n * Used during the analysis phase to collect legal type casts and promotions\n * for type-checking and later to write necessary casts in the bytecode.\n */\n public final class AnalyzerCaster {\n \n- public static Cast getLegalCast(String location, Type actual, Type expected, boolean explicit, boolean internal) {\n+ public static Cast getLegalCast(Location location, Type actual, Type expected, boolean explicit, boolean internal) {\n if (actual.equals(expected)) {\n return null;\n }\n@@ -653,11 +654,11 @@ public static Cast getLegalCast(String location, Type actual, Type expected, boo\n explicit && actual.clazz.isAssignableFrom(expected.clazz)) {\n return new Cast(actual, expected, explicit);\n } else {\n- throw new ClassCastException(\"Error\" + location + \": Cannot cast from [\" + actual.name + \"] to [\" + expected.name + \"].\");\n+ throw location.createError(new ClassCastException(\"Cannot cast from [\" + actual.name + \"] to [\" + expected.name + \"].\"));\n }\n }\n \n- public static Object constCast(final String location, final Object constant, final Cast cast) {\n+ public static Object constCast(Location location, final Object constant, final Cast cast) {\n final Sort fsort = cast.from.sort;\n final Sort tsort = cast.to.sort;\n \n@@ -685,12 +686,12 @@ public static Object constCast(final String location, final Object constant, fin\n case FLOAT: return number.floatValue();\n case DOUBLE: return number.doubleValue();\n default:\n- throw new IllegalStateException(\"Error\" + location + \": Cannot cast from \" +\n- \"[\" + cast.from.clazz.getCanonicalName() + \"] to [\" + cast.to.clazz.getCanonicalName() + \"].\");\n+ throw location.createError(new IllegalStateException(\"Cannot cast from \" +\n+ \"[\" + cast.from.clazz.getCanonicalName() + \"] to [\" + cast.to.clazz.getCanonicalName() + \"].\"));\n }\n } else {\n- throw new IllegalStateException(\"Error\" + location + \": Cannot cast from \" +\n- \"[\" + cast.from.clazz.getCanonicalName() + \"] to [\" + cast.to.clazz.getCanonicalName() + \"].\");\n+ throw location.createError(new IllegalStateException(\"Cannot cast from \" +\n+ \"[\" + cast.from.clazz.getCanonicalName() + \"] to [\" + cast.to.clazz.getCanonicalName() + \"].\"));\n }\n }\n ", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java", "status": "modified" }, { "diff": "@@ -101,7 +101,7 @@ static Executable compile(Loader loader, String name, String source, CompilerSet\n }\n \n Reserved reserved = new Reserved();\n- SSource root = Walker.buildPainlessTree(source, reserved, settings);\n+ SSource root = Walker.buildPainlessTree(name, source, reserved, settings);\n Variables variables = Analyzer.analyze(reserved, root);\n BitSet expressions = new BitSet(source.length());\n \n@@ -132,7 +132,7 @@ static byte[] compile(String name, String source, CompilerSettings settings) {\n }\n \n Reserved reserved = new Reserved();\n- SSource root = Walker.buildPainlessTree(source, reserved, settings);\n+ SSource root = Walker.buildPainlessTree(name, source, reserved, settings);\n Variables variables = Analyzer.analyze(reserved, root);\n \n return Writer.write(settings, name, source, variables, root, new BitSet(source.length()));", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java", "status": "modified" }, { "diff": "@@ -164,6 +164,11 @@ public int hashCode() {\n \n return result;\n }\n+\n+ @Override\n+ public String toString() {\n+ return name;\n+ }\n }\n \n public static final class Constructor {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java", "status": "modified" }, { "diff": "@@ -0,0 +1,111 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.painless;\n+\n+import java.util.Objects;\n+\n+/**\n+ * Represents a location in script code (name of script + character offset)\n+ */\n+public final class Location {\n+ private final String sourceName;\n+ private final int offset;\n+ \n+ /**\n+ * Create a new Location \n+ * @param sourceName script's name\n+ * @param offset character offset of script element\n+ */\n+ public Location(String sourceName, int offset) {\n+ this.sourceName = Objects.requireNonNull(sourceName);\n+ this.offset = offset;\n+ }\n+ \n+ /**\n+ * Return the script's name\n+ */\n+ public String getSourceName() {\n+ return sourceName;\n+ }\n+\n+ /**\n+ * Return the character offset\n+ */\n+ public int getOffset() {\n+ return offset;\n+ }\n+\n+ /**\n+ * Augments an exception with this location's information.\n+ */\n+ public RuntimeException createError(RuntimeException exception) {\n+ StackTraceElement element = new StackTraceElement(WriterConstants.CLASS_NAME, \"compile\", sourceName, offset + 1);\n+ StackTraceElement[] oldStack = exception.getStackTrace();\n+ StackTraceElement[] newStack = new StackTraceElement[oldStack.length + 1];\n+ System.arraycopy(oldStack, 0, newStack, 1, oldStack.length);\n+ newStack[0] = element;\n+ exception.setStackTrace(newStack);\n+ assert exception.getStackTrace().length == newStack.length : \"non-writeable stacktrace for exception: \" + exception.getClass();\n+ return exception;\n+ }\n+\n+ // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we dont know how large it is in bytes, so be safe\n+ private static final int MAX_NAME_LENGTH = 256;\n+ \n+ /** Computes the file name (mostly important for stacktraces) */\n+ public static String computeSourceName(String scriptName, String source) {\n+ StringBuilder fileName = new StringBuilder();\n+ if (scriptName.equals(PainlessScriptEngineService.INLINE_NAME)) {\n+ // its an anonymous script, include at least a portion of the source to help identify which one it is\n+ // but don't create stacktraces with filenames that contain newlines or huge names.\n+\n+ // truncate to the first newline\n+ int limit = source.indexOf('\\n');\n+ if (limit >= 0) {\n+ int limit2 = source.indexOf('\\r');\n+ if (limit2 >= 0) {\n+ limit = Math.min(limit, limit2);\n+ }\n+ } else {\n+ limit = source.length();\n+ }\n+\n+ // truncate to our limit\n+ limit = Math.min(limit, MAX_NAME_LENGTH);\n+ fileName.append(source, 0, limit);\n+\n+ // if we truncated, make it obvious\n+ if (limit != source.length()) {\n+ fileName.append(\" ...\");\n+ }\n+ fileName.append(\" @ <inline script>\");\n+ } else {\n+ // its a named script, just use the name\n+ // but don't trust this has a reasonable length!\n+ if (scriptName.length() > MAX_NAME_LENGTH) {\n+ fileName.append(scriptName, 0, MAX_NAME_LENGTH);\n+ fileName.append(\" ...\");\n+ } else {\n+ fileName.append(scriptName);\n+ }\n+ }\n+ return fileName.toString();\n+ }\n+}", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java", "status": "added" }, { "diff": "@@ -114,7 +114,8 @@ private static String[] getInternalNames(final org.objectweb.asm.Type[] types) {\n * <p>\n * This is invoked for each statement boundary (leaf {@code S*} nodes).\n */\n- public void writeStatementOffset(int offset) {\n+ public void writeStatementOffset(Location location) {\n+ int offset = location.getOffset();\n // ensure we don't have duplicate stuff going in here. can catch bugs\n // (e.g. nodes get assigned wrong offsets by antlr walker)\n assert statements.get(offset) == false;\n@@ -126,16 +127,16 @@ public void writeStatementOffset(int offset) {\n * <p>\n * This is invoked before instructions that can hit exceptions.\n */\n- public void writeDebugInfo(int offset) {\n+ public void writeDebugInfo(Location location) {\n // TODO: maybe track these in bitsets too? this is trickier...\n Label label = new Label();\n visitLabel(label);\n- visitLineNumber(offset + 1, label);\n+ visitLineNumber(location.getOffset() + 1, label);\n }\n \n- public void writeLoopCounter(int slot, int count, int offset) {\n+ public void writeLoopCounter(int slot, int count, Location location) {\n if (slot > -1) {\n- writeDebugInfo(offset);\n+ writeDebugInfo(location);\n final Label end = new Label();\n \n iinc(slot, -count);\n@@ -281,14 +282,14 @@ public void writeToStrings() {\n }\n }\n \n- public void writeBinaryInstruction(final String location, final Type type, final Operation operation) {\n+ public void writeBinaryInstruction(Location location, Type type, Operation operation) {\n final Sort sort = type.sort;\n \n if ((sort == Sort.FLOAT || sort == Sort.DOUBLE) &&\n (operation == Operation.LSH || operation == Operation.USH ||\n operation == Operation.RSH || operation == Operation.BWAND ||\n operation == Operation.XOR || operation == Operation.BWOR)) {\n- throw new IllegalStateException(\"Error \" + location + \": Illegal tree structure.\");\n+ throw location.createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n if (sort == Sort.DEF) {\n@@ -305,7 +306,7 @@ public void writeBinaryInstruction(final String location, final Type type, final\n case XOR: invokeStatic(DEF_UTIL_TYPE, DEF_XOR_CALL); break;\n case BWOR: invokeStatic(DEF_UTIL_TYPE, DEF_OR_CALL); break;\n default:\n- throw new IllegalStateException(\"Error \" + location + \": Illegal tree structure.\");\n+ throw location.createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n } else {\n switch (operation) {\n@@ -321,7 +322,7 @@ public void writeBinaryInstruction(final String location, final Type type, final\n case XOR: math(GeneratorAdapter.XOR, type.type); break;\n case BWOR: math(GeneratorAdapter.OR, type.type); break;\n default:\n- throw new IllegalStateException(\"Error \" + location + \": Illegal tree structure.\");\n+ throw location.createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import org.elasticsearch.script.ExecutableScript;\n import org.elasticsearch.script.LeafSearchScript;\n import org.elasticsearch.script.ScriptEngineService;\n+import org.elasticsearch.script.ScriptException;\n import org.elasticsearch.script.SearchScript;\n import org.elasticsearch.search.lookup.SearchLookup;\n \n@@ -38,7 +39,10 @@\n import java.security.Permissions;\n import java.security.PrivilegedAction;\n import java.security.ProtectionDomain;\n+import java.util.ArrayList;\n+import java.util.Collections;\n import java.util.HashMap;\n+import java.util.List;\n import java.util.Map;\n \n /**\n@@ -147,13 +151,17 @@ public Loader run() {\n }\n });\n \n- // Drop all permissions to actually compile the code itself.\n- return AccessController.doPrivileged(new PrivilegedAction<Executable>() {\n- @Override\n- public Executable run() {\n- return Compiler.compile(loader, scriptName == null ? INLINE_NAME : scriptName, scriptSource, compilerSettings);\n- }\n- }, COMPILATION_CONTEXT);\n+ try {\n+ // Drop all permissions to actually compile the code itself.\n+ return AccessController.doPrivileged(new PrivilegedAction<Executable>() {\n+ @Override\n+ public Executable run() {\n+ return Compiler.compile(loader, scriptName == null ? INLINE_NAME : scriptName, scriptSource, compilerSettings);\n+ }\n+ }, COMPILATION_CONTEXT);\n+ } catch (Exception e) {\n+ throw convertToScriptException(scriptName == null ? scriptSource : scriptName, scriptSource, e);\n+ }\n }\n \n /**\n@@ -213,4 +221,51 @@ public void scriptRemoved(final CompiledScript script) {\n public void close() {\n // Nothing to do.\n }\n+ \n+ private ScriptException convertToScriptException(String scriptName, String scriptSource, Throwable t) {\n+ // create a script stack: this is just the script portion\n+ List<String> scriptStack = new ArrayList<>();\n+ for (StackTraceElement element : t.getStackTrace()) {\n+ if (WriterConstants.CLASS_NAME.equals(element.getClassName())) {\n+ // found the script portion\n+ int offset = element.getLineNumber();\n+ if (offset == -1) {\n+ scriptStack.add(\"<<< unknown portion of script >>>\");\n+ } else {\n+ offset--; // offset is 1 based, line numbers must be!\n+ int startOffset = getPreviousStatement(scriptSource, offset);\n+ int endOffset = getNextStatement(scriptSource, offset);\n+ StringBuilder snippet = new StringBuilder();\n+ if (startOffset > 0) {\n+ snippet.append(\"... \");\n+ }\n+ snippet.append(scriptSource.substring(startOffset, endOffset));\n+ if (endOffset < scriptSource.length()) {\n+ snippet.append(\" ...\");\n+ }\n+ scriptStack.add(snippet.toString());\n+ StringBuilder pointer = new StringBuilder();\n+ if (startOffset > 0) {\n+ pointer.append(\" \");\n+ }\n+ for (int i = startOffset; i < offset; i++) {\n+ pointer.append(' ');\n+ }\n+ pointer.append(\"^---- HERE\");\n+ scriptStack.add(pointer.toString());\n+ }\n+ break;\n+ }\n+ }\n+ throw new ScriptException(\"compile error\", t, scriptStack, scriptSource, PainlessScriptEngineService.NAME);\n+ }\n+ \n+ // very simple heuristic: +/- 25 chars. can be improved later.\n+ private int getPreviousStatement(String scriptSource, int offset) {\n+ return Math.max(0, offset - 25);\n+ }\n+ \n+ private int getNextStatement(String scriptSource, int offset) {\n+ return Math.min(scriptSource.length(), offset + 25);\n+ }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java", "status": "modified" }, { "diff": "@@ -173,7 +173,8 @@ private ScriptException convertToScriptException(Throwable t) {\n /** returns true for methods that are part of the runtime */\n private static boolean shouldFilter(StackTraceElement element) {\n return element.getClassName().startsWith(\"org.elasticsearch.painless.\") ||\n- element.getClassName().startsWith(\"java.lang.invoke.\");\n+ element.getClassName().startsWith(\"java.lang.invoke.\") ||\n+ element.getClassName().startsWith(\"sun.invoke.\");\n }\n \n /**", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java", "status": "modified" }, { "diff": "@@ -69,15 +69,15 @@ public void usesLoop() {\n }\n \n public static final class Variable {\n- public final String location;\n+ public final Location location;\n public final String name;\n public final Type type;\n public final int slot;\n public final boolean readonly;\n \n public boolean read = false;\n \n- private Variable(String location, String name, Type type, int slot, boolean readonly) {\n+ private Variable(Location location, String name, Type type, int slot, boolean readonly) {\n this.location = location;\n this.name = name;\n this.type = type;\n@@ -88,6 +88,7 @@ private Variable(String location, String name, Type type, int slot, boolean read\n \n final Reserved reserved;\n \n+ // TODO: this datastructure runs in linear time for nearly all operations. use linkedhashset instead?\n private final Deque<Integer> scopes = new ArrayDeque<>();\n private final Deque<Variable> variables = new ArrayDeque<>();\n \n@@ -99,35 +100,35 @@ public Variables(Reserved reserved) {\n // Method variables.\n \n // This reference. Internal use only.\n- addVariable(\"[\" + Reserved.THIS + \"]\", Definition.getType(\"Object\"), Reserved.THIS, true, true);\n+ addVariable(null, Definition.getType(\"Object\"), Reserved.THIS, true, true);\n \n // Input map of variables passed to the script.\n- addVariable(\"[\" + Reserved.PARAMS + \"]\", Definition.getType(\"Map\"), Reserved.PARAMS, true, true);\n+ addVariable(null, Definition.getType(\"Map\"), Reserved.PARAMS, true, true);\n \n // Scorer parameter passed to the script. Internal use only.\n- addVariable(\"[\" + Reserved.SCORER + \"]\", Definition.DEF_TYPE, Reserved.SCORER, true, true);\n+ addVariable(null, Definition.DEF_TYPE, Reserved.SCORER, true, true);\n \n // Doc parameter passed to the script. TODO: Currently working as a Map, we can do better?\n- addVariable(\"[\" + Reserved.DOC + \"]\", Definition.getType(\"Map\"), Reserved.DOC, true, true);\n+ addVariable(null, Definition.getType(\"Map\"), Reserved.DOC, true, true);\n \n // Aggregation _value parameter passed to the script.\n- addVariable(\"[\" + Reserved.VALUE + \"]\", Definition.DEF_TYPE, Reserved.VALUE, true, true);\n+ addVariable(null, Definition.DEF_TYPE, Reserved.VALUE, true, true);\n \n // Shortcut variables.\n \n // Document's score as a read-only double.\n if (reserved.score) {\n- addVariable(\"[\" + Reserved.SCORE + \"]\", Definition.DOUBLE_TYPE, Reserved.SCORE, true, true);\n+ addVariable(null, Definition.DOUBLE_TYPE, Reserved.SCORE, true, true);\n }\n \n // The ctx map set by executable scripts as a read-only map.\n if (reserved.ctx) {\n- addVariable(\"[\" + Reserved.CTX + \"]\", Definition.getType(\"Map\"), Reserved.CTX, true, true);\n+ addVariable(null, Definition.getType(\"Map\"), Reserved.CTX, true, true);\n }\n \n // Loop counter to catch infinite loops. Internal use only.\n if (reserved.loop) {\n- addVariable(\"[\" + Reserved.LOOP + \"]\", Definition.INT_TYPE, Reserved.LOOP, true, true);\n+ addVariable(null, Definition.INT_TYPE, Reserved.LOOP, true, true);\n }\n }\n \n@@ -137,19 +138,20 @@ public void incrementScope() {\n \n public void decrementScope() {\n int remove = scopes.pop();\n-\n+ \n while (remove > 0) {\n- Variable variable = variables.pop();\n-\n+ Variable variable = variables.pop();\n+ \n+ // TODO: is this working? the code reads backwards...\n if (variable.read) {\n- throw new IllegalArgumentException(\"Error [\" + variable.location + \"]: Variable [\" + variable.name + \"] never used.\");\n+ throw variable.location.createError(new IllegalArgumentException(\"Variable [\" + variable.name + \"] never used.\"));\n }\n-\n+ \n --remove;\n }\n }\n \n- public Variable getVariable(String location, String name) {\n+ public Variable getVariable(Location location, String name) {\n Iterator<Variable> itr = variables.iterator();\n \n while (itr.hasNext()) {\n@@ -160,20 +162,20 @@ public Variable getVariable(String location, String name) {\n }\n }\n \n- if (location != null) {\n- throw new IllegalArgumentException(\"Error \" + location + \": Variable [\" + name + \"] not defined.\");\n- }\n+ throw location.createError(new IllegalArgumentException(\"Variable [\" + name + \"] not defined.\"));\n+ }\n \n- return null;\n+ private boolean variableExists(String name) {\n+ return variables.contains(name);\n }\n \n- public Variable addVariable(String location, Type type, String name, boolean readonly, boolean reserved) {\n+ public Variable addVariable(Location location, Type type, String name, boolean readonly, boolean reserved) {\n if (!reserved && this.reserved.isReserved(name)) {\n- throw new IllegalArgumentException(\"Error \" + location + \": Variable name [\" + name + \"] is reserved.\");\n+ throw location.createError(new IllegalArgumentException(\"Variable name [\" + name + \"] is reserved.\"));\n }\n \n- if (getVariable(null, name) != null) {\n- throw new IllegalArgumentException(\"Error \" + location + \": Variable name [\" + name + \"] already defined.\");\n+ if (variableExists(name)) {\n+ throw new IllegalArgumentException(\"Variable name [\" + name + \"] already defined.\");\n }\n \n try {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java", "status": "modified" }, { "diff": "@@ -72,9 +72,6 @@ private Writer(CompilerSettings settings, String name, String source, Variables\n writeEnd();\n }\n \n- // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we dont know how large it is in bytes, so be safe\n- private static final int MAX_NAME_LENGTH = 256;\n-\n private void writeBegin() {\n final int version = Opcodes.V1_8;\n final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL;\n@@ -86,47 +83,7 @@ private void writeBegin() {\n new String[] { WriterConstants.NEEDS_SCORE_TYPE.getInternalName() } : null;\n \n writer.visit(version, access, name, null, base, interfaces);\n- writer.visitSource(computeSourceName(), null);\n- }\n-\n- /** Computes the file name (mostly important for stacktraces) */\n- private String computeSourceName() {\n- StringBuilder fileName = new StringBuilder();\n- if (scriptName.equals(PainlessScriptEngineService.INLINE_NAME)) {\n- // its an anonymous script, include at least a portion of the source to help identify which one it is\n- // but don't create stacktraces with filenames that contain newlines or huge names.\n-\n- // truncate to the first newline\n- int limit = source.indexOf('\\n');\n- if (limit >= 0) {\n- int limit2 = source.indexOf('\\r');\n- if (limit2 >= 0) {\n- limit = Math.min(limit, limit2);\n- }\n- } else {\n- limit = source.length();\n- }\n-\n- // truncate to our limit\n- limit = Math.min(limit, MAX_NAME_LENGTH);\n- fileName.append(source, 0, limit);\n-\n- // if we truncated, make it obvious\n- if (limit != source.length()) {\n- fileName.append(\" ...\");\n- }\n- fileName.append(\" @ <inline script>\");\n- } else {\n- // its a named script, just use the name\n- // but don't trust this has a reasonable length!\n- if (scriptName.length() > MAX_NAME_LENGTH) {\n- fileName.append(scriptName, 0, MAX_NAME_LENGTH);\n- fileName.append(\" ...\");\n- } else {\n- fileName.append(scriptName);\n- }\n- }\n- return fileName.toString();\n+ writer.visitSource(Location.computeSourceName(scriptName,source), null);\n }\n \n private void writeConstructor() {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java", "status": "modified" }, { "diff": "@@ -22,16 +22,17 @@\n import org.antlr.v4.runtime.CharStream;\n import org.antlr.v4.runtime.LexerNoViableAltException;\n import org.antlr.v4.runtime.misc.Interval;\n-\n-import java.text.ParseException;\n+import org.elasticsearch.painless.Location;\n \n /**\n * A lexer that will override the default error behavior to fail on the first error.\n */\n final class ErrorHandlingLexer extends PainlessLexer {\n+ final String sourceName;\n \n- ErrorHandlingLexer(final CharStream charStream) {\n+ ErrorHandlingLexer(CharStream charStream, String sourceName) {\n super(charStream);\n+ this.sourceName = sourceName;\n }\n \n @Override\n@@ -40,11 +41,7 @@ public void recover(final LexerNoViableAltException lnvae) {\n final int startIndex = lnvae.getStartIndex();\n final String text = charStream.getText(Interval.of(startIndex, charStream.index()));\n \n- final ParseException parseException = new ParseException(\"Error [\" + _tokenStartLine + \":\" +\n- _tokenStartCharPositionInLine + \"]: unexpected character [\" +\n- getErrorDisplay(text) + \"].\", _tokenStartCharIndex);\n- parseException.initCause(lnvae);\n-\n- throw new RuntimeException(parseException);\n+ Location location = new Location(sourceName, _tokenStartCharIndex);\n+ throw location.createError(new IllegalArgumentException(\"unexpected character [\" + getErrorDisplay(text) + \"].\", lnvae));\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/ErrorHandlingLexer.java", "status": "modified" }, { "diff": "@@ -25,52 +25,50 @@\n import org.antlr.v4.runtime.Parser;\n import org.antlr.v4.runtime.RecognitionException;\n import org.antlr.v4.runtime.Token;\n-\n-import java.text.ParseException;\n+import org.elasticsearch.painless.Location;\n \n /**\n * An error strategy that will override the default error behavior to fail on the first parser error.\n */\n final class ParserErrorStrategy extends DefaultErrorStrategy {\n+ final String sourceName;\n+ \n+ ParserErrorStrategy(String sourceName) {\n+ this.sourceName = sourceName;\n+ }\n \n @Override\n public void recover(final Parser recognizer, final RecognitionException re) {\n final Token token = re.getOffendingToken();\n String message;\n \n if (token == null) {\n- message = \"Error: no parse token found.\";\n+ message = \"no parse token found.\";\n } else if (re instanceof InputMismatchException) {\n- message = \"Error[\" + token.getLine() + \":\" + token.getCharPositionInLine() + \"]:\" +\n- \" unexpected token [\" + getTokenErrorDisplay(token) + \"]\" +\n+ message = \"unexpected token [\" + getTokenErrorDisplay(token) + \"]\" +\n \" was expecting one of [\" + re.getExpectedTokens().toString(recognizer.getVocabulary()) + \"].\";\n } else if (re instanceof NoViableAltException) {\n if (token.getType() == PainlessParser.EOF) {\n- message = \"Error: unexpected end of script.\";\n+ message = \"unexpected end of script.\";\n } else {\n- message = \"Error[\" + token.getLine() + \":\" + token.getCharPositionInLine() + \"]:\" +\n- \"invalid sequence of tokens near [\" + getTokenErrorDisplay(token) + \"].\";\n+ message = \"invalid sequence of tokens near [\" + getTokenErrorDisplay(token) + \"].\";\n }\n } else {\n- message = \"Error[\" + token.getLine() + \":\" + token.getCharPositionInLine() + \"]:\" +\n- \" unexpected token near [\" + getTokenErrorDisplay(token) + \"].\";\n+ message = \"unexpected token near [\" + getTokenErrorDisplay(token) + \"].\";\n }\n \n- final ParseException parseException = new ParseException(message, token == null ? -1 : token.getStartIndex());\n- parseException.initCause(re);\n-\n- throw new RuntimeException(parseException);\n+ Location location = new Location(sourceName, token == null ? -1 : token.getStartIndex());\n+ throw location.createError(new IllegalArgumentException(message, re));\n }\n \n @Override\n public Token recoverInline(final Parser recognizer) throws RecognitionException {\n final Token token = recognizer.getCurrentToken();\n- final String message = \"Error[\" + token.getLine() + \":\" + token.getCharPositionInLine() + \"]:\" +\n- \" unexpected token [\" + getTokenErrorDisplay(token) + \"]\" +\n+ final String message = \"unexpected token [\" + getTokenErrorDisplay(token) + \"]\" +\n \" was expecting one of [\" + recognizer.getExpectedTokens().toString(recognizer.getVocabulary()) + \"].\";\n- final ParseException parseException = new ParseException(message, token.getStartIndex());\n \n- throw new RuntimeException(parseException);\n+ Location location = new Location(sourceName, token.getStartIndex());\n+ throw location.createError(new IllegalArgumentException(message));\n }\n \n @Override", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/ParserErrorStrategy.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.antlr.v4.runtime.Recognizer;\n import org.antlr.v4.runtime.atn.PredictionMode;\n import org.elasticsearch.painless.CompilerSettings;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Operation;\n import org.elasticsearch.painless.Variables.Reserved;\n import org.elasticsearch.painless.antlr.PainlessParser.AfterthoughtContext;\n@@ -135,25 +136,27 @@\n */\n public final class Walker extends PainlessParserBaseVisitor<Object> {\n \n- public static SSource buildPainlessTree(String source, Reserved reserved, CompilerSettings settings) {\n- return new Walker(source, reserved, settings).source;\n+ public static SSource buildPainlessTree(String name, String sourceText, Reserved reserved, CompilerSettings settings) {\n+ return new Walker(name, sourceText, reserved, settings).source;\n }\n \n private final Reserved reserved;\n private final SSource source;\n private final CompilerSettings settings;\n+ private final String sourceName;\n \n- private Walker(String source, Reserved reserved, CompilerSettings settings) {\n+ private Walker(String name, String sourceText, Reserved reserved, CompilerSettings settings) {\n this.reserved = reserved;\n this.settings = settings;\n- this.source = (SSource)visit(buildAntlrTree(source));\n+ this.sourceName = Location.computeSourceName(name, sourceText);\n+ this.source = (SSource)visit(buildAntlrTree(sourceText));\n }\n \n private SourceContext buildAntlrTree(String source) {\n ANTLRInputStream stream = new ANTLRInputStream(source);\n- PainlessLexer lexer = new ErrorHandlingLexer(stream);\n+ PainlessLexer lexer = new ErrorHandlingLexer(stream, sourceName);\n PainlessParser parser = new PainlessParser(new CommonTokenStream(lexer));\n- ParserErrorStrategy strategy = new ParserErrorStrategy();\n+ ParserErrorStrategy strategy = new ParserErrorStrategy(sourceName);\n \n lexer.removeErrorListeners();\n parser.removeErrorListeners();\n@@ -185,16 +188,8 @@ public void syntaxError(final Recognizer<?,?> recognizer, final Object offending\n parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);\n }\n \n- private int line(ParserRuleContext ctx) {\n- return ctx.getStart().getLine();\n- }\n-\n- private int offset(ParserRuleContext ctx) {\n- return ctx.getStart().getStartIndex();\n- }\n-\n- private String location(ParserRuleContext ctx) {\n- return \"[ \" + ctx.getStart().getLine() + \" : \" + ctx.getStart().getCharPositionInLine() + \" ]\";\n+ private Location location(ParserRuleContext ctx) {\n+ return new Location(sourceName, ctx.getStart().getStartIndex());\n }\n \n @Override\n@@ -205,7 +200,7 @@ public Object visitSource(SourceContext ctx) {\n statements.add((AStatement)visit(statement));\n }\n \n- return new SSource(line(ctx), offset(ctx), location(ctx), statements);\n+ return new SSource(location(ctx), statements);\n }\n \n @Override\n@@ -216,9 +211,9 @@ public Object visitIf(IfContext ctx) {\n if (ctx.trailer().size() > 1) {\n SBlock elseblock = (SBlock)visit(ctx.trailer(1));\n \n- return new SIfElse(line(ctx), offset(ctx), location(ctx), expression, ifblock, elseblock);\n+ return new SIfElse(location(ctx), expression, ifblock, elseblock);\n } else {\n- return new SIf(line(ctx), offset(ctx), location(ctx), expression, ifblock);\n+ return new SIf(location(ctx), expression, ifblock);\n }\n }\n \n@@ -233,11 +228,11 @@ public Object visitWhile(WhileContext ctx) {\n if (ctx.trailer() != null) {\n SBlock block = (SBlock)visit(ctx.trailer());\n \n- return new SWhile(line(ctx), offset(ctx), location(ctx), settings.getMaxLoopCounter(), expression, block);\n+ return new SWhile(location(ctx), settings.getMaxLoopCounter(), expression, block);\n } else if (ctx.empty() != null) {\n- return new SWhile(line(ctx), offset(ctx), location(ctx), settings.getMaxLoopCounter(), expression, null);\n+ return new SWhile(location(ctx), settings.getMaxLoopCounter(), expression, null);\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\" Illegal tree structure.\"));\n }\n }\n \n@@ -250,7 +245,7 @@ public Object visitDo(DoContext ctx) {\n AExpression expression = (AExpression)visitExpression(ctx.expression());\n SBlock block = (SBlock)visit(ctx.block());\n \n- return new SDo(line(ctx), offset(ctx), location(ctx), settings.getMaxLoopCounter(), block, expression);\n+ return new SDo(location(ctx), settings.getMaxLoopCounter(), block, expression);\n }\n \n @Override\n@@ -266,13 +261,13 @@ public Object visitFor(ForContext ctx) {\n if (ctx.trailer() != null) {\n SBlock block = (SBlock)visit(ctx.trailer());\n \n- return new SFor(line(ctx), offset(ctx), location(ctx),\n+ return new SFor(location(ctx),\n settings.getMaxLoopCounter(), initializer, expression, afterthought, block);\n } else if (ctx.empty() != null) {\n- return new SFor(line(ctx), offset(ctx), location(ctx),\n+ return new SFor(location(ctx),\n settings.getMaxLoopCounter(), initializer, expression, afterthought, null);\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -283,19 +278,19 @@ public Object visitDecl(DeclContext ctx) {\n \n @Override\n public Object visitContinue(ContinueContext ctx) {\n- return new SContinue(line(ctx), offset(ctx), location(ctx));\n+ return new SContinue(location(ctx));\n }\n \n @Override\n public Object visitBreak(BreakContext ctx) {\n- return new SBreak(line(ctx), offset(ctx), location(ctx));\n+ return new SBreak(location(ctx));\n }\n \n @Override\n public Object visitReturn(ReturnContext ctx) {\n AExpression expression = (AExpression)visitExpression(ctx.expression());\n \n- return new SReturn(line(ctx), offset(ctx), location(ctx), expression);\n+ return new SReturn(location(ctx), expression);\n }\n \n @Override\n@@ -307,21 +302,21 @@ public Object visitTry(TryContext ctx) {\n catches.add((SCatch)visit(trap));\n }\n \n- return new STry(line(ctx), offset(ctx), location(ctx), block, catches);\n+ return new STry(location(ctx), block, catches);\n }\n \n @Override\n public Object visitThrow(ThrowContext ctx) {\n AExpression expression = (AExpression)visitExpression(ctx.expression());\n \n- return new SThrow(line(ctx), offset(ctx), location(ctx), expression);\n+ return new SThrow(location(ctx), expression);\n }\n \n @Override\n public Object visitExpr(ExprContext ctx) {\n AExpression expression = (AExpression)visitExpression(ctx.expression());\n \n- return new SExpression(line(ctx), offset(ctx), location(ctx), expression);\n+ return new SExpression(location(ctx), expression);\n }\n \n @Override\n@@ -332,9 +327,9 @@ public Object visitTrailer(TrailerContext ctx) {\n List<AStatement> statements = new ArrayList<>();\n statements.add((AStatement)visit(ctx.statement()));\n \n- return new SBlock(line(ctx), offset(ctx), location(ctx), statements);\n+ return new SBlock(location(ctx), statements);\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -349,13 +344,13 @@ public Object visitBlock(BlockContext ctx) {\n statements.add((AStatement)visit(statement));\n }\n \n- return new SBlock(line(ctx), offset(ctx), location(ctx), statements);\n+ return new SBlock(location(ctx), statements);\n }\n }\n \n @Override\n public Object visitEmpty(EmptyContext ctx) {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n @Override\n@@ -365,7 +360,7 @@ public Object visitInitializer(InitializerContext ctx) {\n } else if (ctx.expression() != null) {\n return visitExpression(ctx.expression());\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -383,25 +378,25 @@ public Object visitDeclaration(DeclarationContext ctx) {\n String name = declvar.ID().getText();\n AExpression expression = declvar.expression() == null ? null : (AExpression)visitExpression(declvar.expression());\n \n- declarations.add(new SDeclaration(line(declvar), offset(declvar), location(declvar), type, name, expression));\n+ declarations.add(new SDeclaration(location(declvar), type, name, expression));\n }\n \n- return new SDeclBlock(line(ctx), offset(ctx), location(ctx), declarations);\n+ return new SDeclBlock(location(ctx), declarations);\n }\n \n @Override\n public Object visitDecltype(DecltypeContext ctx) {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n @Override\n public Object visitFuncref(FuncrefContext ctx) {\n- return new EFunctionRef(line(ctx), offset(ctx), location(ctx), ctx.TYPE().getText(), ctx.ID().getText());\n+ return new EFunctionRef(location(ctx), ctx.TYPE().getText(), ctx.ID().getText());\n }\n \n @Override\n public Object visitDeclvar(DeclvarContext ctx) {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n @Override\n@@ -410,12 +405,12 @@ public Object visitTrap(TrapContext ctx) {\n String name = ctx.ID().getText();\n SBlock block = (SBlock)visit(ctx.block());\n \n- return new SCatch(line(ctx), offset(ctx), location(ctx), type, name, block);\n+ return new SCatch(location(ctx), type, name, block);\n }\n \n @Override\n public Object visitDelimiter(DelimiterContext ctx) {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n private Object visitExpression(ExpressionContext ctx) {\n@@ -425,7 +420,7 @@ private Object visitExpression(ExpressionContext ctx) {\n @SuppressWarnings(\"unchecked\")\n List<ALink> links = (List<ALink>)expression;\n \n- return new EChain(line(ctx), offset(ctx), location(ctx), links, false, false, null, null);\n+ return new EChain(location(ctx), links, false, false, null, null);\n } else {\n return expression;\n }\n@@ -465,10 +460,10 @@ public Object visitBinary(BinaryContext ctx) {\n } else if (ctx.BWOR() != null) {\n operation = Operation.BWOR;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Unexpected state.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Unexpected state.\"));\n }\n \n- return new EBinary(line(ctx), offset(ctx), location(ctx), operation, left, right);\n+ return new EBinary(location(ctx), operation, left, right);\n }\n \n @Override\n@@ -494,10 +489,10 @@ public Object visitComp(CompContext ctx) {\n } else if (ctx.NER() != null) {\n operation = Operation.NER;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Unexpected state.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Unexpected state.\"));\n }\n \n- return new EComp(line(ctx), offset(ctx), location(ctx), operation, left, right);\n+ return new EComp(location(ctx), operation, left, right);\n }\n \n @Override\n@@ -511,10 +506,10 @@ public Object visitBool(BoolContext ctx) {\n } else if (ctx.BOOLOR() != null) {\n operation = Operation.OR;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Unexpected state.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Unexpected state.\"));\n }\n \n- return new EBool(line(ctx), offset(ctx), location(ctx), operation, left, right);\n+ return new EBool(location(ctx), operation, left, right);\n }\n \n @Override\n@@ -523,7 +518,7 @@ public Object visitConditional(ConditionalContext ctx) {\n AExpression left = (AExpression)visitExpression(ctx.expression(1));\n AExpression right = (AExpression)visitExpression(ctx.expression(2));\n \n- return new EConditional(line(ctx), offset(ctx), location(ctx), condition, left, right);\n+ return new EConditional(location(ctx), condition, left, right);\n }\n \n @Override\n@@ -557,12 +552,12 @@ public Object visitAssignment(AssignmentContext ctx) {\n } else if (ctx.AOR() != null) {\n operation = Operation.BWOR;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n AExpression expression = (AExpression)visitExpression(ctx.expression());\n \n- return new EChain(line(ctx), offset(ctx), location(ctx), links, false, false, operation, expression);\n+ return new EChain(location(ctx), links, false, false, operation, expression);\n }\n \n private Object visitUnary(UnaryContext ctx) {\n@@ -572,7 +567,7 @@ private Object visitUnary(UnaryContext ctx) {\n @SuppressWarnings(\"unchecked\")\n List<ALink> links = (List<ALink>)expression;\n \n- return new EChain(line(ctx), offset(ctx), location(ctx), links, false, false, null, null);\n+ return new EChain(location(ctx), links, false, false, null, null);\n } else {\n return expression;\n }\n@@ -589,10 +584,10 @@ public Object visitPre(PreContext ctx) {\n } else if (ctx.DECR() != null) {\n operation = Operation.DECR;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n- return new EChain(line(ctx), offset(ctx), location(ctx), links, true, false, operation, null);\n+ return new EChain(location(ctx), links, true, false, operation, null);\n }\n \n @Override\n@@ -606,10 +601,10 @@ public Object visitPost(PostContext ctx) {\n } else if (ctx.DECR() != null) {\n operation = Operation.DECR;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n- return new EChain(line(ctx), offset(ctx), location(ctx), links, false, true, operation, null);\n+ return new EChain(location(ctx), links, false, true, operation, null);\n }\n \n @Override\n@@ -622,31 +617,31 @@ public Object visitNumeric(NumericContext ctx) {\n final boolean negate = ctx.parent instanceof OperatorContext && ((OperatorContext)ctx.parent).SUB() != null;\n \n if (ctx.DECIMAL() != null) {\n- return new EDecimal(line(ctx), offset(ctx), location(ctx), (negate ? \"-\" : \"\") + ctx.DECIMAL().getText());\n+ return new EDecimal(location(ctx), (negate ? \"-\" : \"\") + ctx.DECIMAL().getText());\n } else if (ctx.HEX() != null) {\n- return new ENumeric(line(ctx), offset(ctx), location(ctx), (negate ? \"-\" : \"\") + ctx.HEX().getText().substring(2), 16);\n+ return new ENumeric(location(ctx), (negate ? \"-\" : \"\") + ctx.HEX().getText().substring(2), 16);\n } else if (ctx.INTEGER() != null) {\n- return new ENumeric(line(ctx), offset(ctx), location(ctx), (negate ? \"-\" : \"\") + ctx.INTEGER().getText(), 10);\n+ return new ENumeric(location(ctx), (negate ? \"-\" : \"\") + ctx.INTEGER().getText(), 10);\n } else if (ctx.OCTAL() != null) {\n- return new ENumeric(line(ctx), offset(ctx), location(ctx), (negate ? \"-\" : \"\") + ctx.OCTAL().getText().substring(1), 8);\n+ return new ENumeric(location(ctx), (negate ? \"-\" : \"\") + ctx.OCTAL().getText().substring(1), 8);\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \": Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n @Override\n public Object visitTrue(TrueContext ctx) {\n- return new EBoolean(line(ctx), offset(ctx), location(ctx), true);\n+ return new EBoolean(location(ctx), true);\n }\n \n @Override\n public Object visitFalse(FalseContext ctx) {\n- return new EBoolean(line(ctx), offset(ctx), location(ctx), false);\n+ return new EBoolean(location(ctx), false);\n }\n \n @Override\n public Object visitNull(NullContext ctx) {\n- return new ENull(line(ctx), offset(ctx), location(ctx));\n+ return new ENull(location(ctx));\n }\n \n @Override\n@@ -666,10 +661,10 @@ public Object visitOperator(OperatorContext ctx) {\n } else if (ctx.SUB() != null) {\n operation = Operation.SUB;\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n- return new EUnary(line(ctx), offset(ctx), location(ctx), operation, expression);\n+ return new EUnary(location(ctx), operation, expression);\n }\n }\n \n@@ -681,11 +676,11 @@ public Object visitCast(CastContext ctx) {\n if (child instanceof List) {\n @SuppressWarnings(\"unchecked\")\n List<ALink> links = (List<ALink>)child;\n- links.add(new LCast(line(ctx), offset(ctx), location(ctx), type));\n+ links.add(new LCast(location(ctx), type));\n \n return links;\n } else {\n- return new EExplicit(line(ctx), offset(ctx), location(ctx), type, (AExpression)child);\n+ return new EExplicit(location(ctx), type, (AExpression)child);\n }\n }\n \n@@ -703,7 +698,7 @@ public Object visitDynamic(DynamicContext ctx) {\n \n return links;\n } else if (!ctx.secondary().isEmpty()) {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n } else {\n return child;\n }\n@@ -714,7 +709,7 @@ public Object visitStatic(StaticContext ctx) {\n String type = ctx.decltype().getText();\n List<ALink> links = new ArrayList<>();\n \n- links.add(new LStatic(line(ctx), offset(ctx), location(ctx), type));\n+ links.add(new LStatic(location(ctx), type));\n links.add((ALink)visit(ctx.dot()));\n \n for (SecondaryContext secondary : ctx.secondary()) {\n@@ -734,7 +729,7 @@ public Object visitNewarray(NewarrayContext ctx) {\n }\n \n List<ALink> links = new ArrayList<>();\n- links.add(new LNewArray(line(ctx), offset(ctx), location(ctx), type, expressions));\n+ links.add(new LNewArray(location(ctx), type, expressions));\n \n if (ctx.dot() != null) {\n links.add((ALink)visit(ctx.dot()));\n@@ -743,7 +738,7 @@ public Object visitNewarray(NewarrayContext ctx) {\n links.add((ALink)visit(secondary));\n }\n } else if (!ctx.secondary().isEmpty()) {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n return links;\n@@ -763,7 +758,7 @@ public Object visitChainprec(ChainprecContext ctx) {\n public Object visitString(StringContext ctx) {\n String string = ctx.STRING().getText().substring(1, ctx.STRING().getText().length() - 1);\n List<ALink> links = new ArrayList<>();\n- links.add(new LString(line(ctx), offset(ctx), location(ctx), string));\n+ links.add(new LString(location(ctx), string));\n \n return links;\n }\n@@ -772,7 +767,7 @@ public Object visitString(StringContext ctx) {\n public Object visitVariable(VariableContext ctx) {\n String name = ctx.ID().getText();\n List<ALink> links = new ArrayList<>();\n- links.add(new LVariable(line(ctx), offset(ctx), location(ctx), name));\n+ links.add(new LVariable(location(ctx), name));\n \n reserved.markReserved(name);\n \n@@ -786,7 +781,7 @@ public Object visitNewobject(NewobjectContext ctx) {\n List<AExpression> arguments = (List<AExpression>)visit(ctx.arguments());\n \n List<ALink> links = new ArrayList<>();\n- links.add(new LNewObj(line(ctx), offset(ctx), location(ctx), type, arguments));\n+ links.add(new LNewObj(location(ctx), type, arguments));\n \n return links;\n }\n@@ -798,7 +793,7 @@ public Object visitSecondary(SecondaryContext ctx) {\n } else if (ctx.brace() != null) {\n return visit(ctx.brace());\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -808,7 +803,7 @@ public Object visitCallinvoke(CallinvokeContext ctx) {\n @SuppressWarnings(\"unchecked\")\n List<AExpression> arguments = (List<AExpression>)visit(ctx.arguments());\n \n- return new LCall(line(ctx), offset(ctx), location(ctx), name, arguments);\n+ return new LCall(location(ctx), name, arguments);\n }\n \n @Override\n@@ -820,17 +815,17 @@ public Object visitFieldaccess(FieldaccessContext ctx) {\n } else if (ctx.DOTINTEGER() != null) {\n value = ctx.DOTINTEGER().getText();\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n- return new LField(line(ctx), offset(ctx), location(ctx), value);\n+ return new LField(location(ctx), value);\n }\n \n @Override\n public Object visitBraceaccess(BraceaccessContext ctx) {\n AExpression expression = (AExpression)visitExpression(ctx.expression());\n \n- return new LBrace(line(ctx), offset(ctx), location(ctx), expression);\n+ return new LBrace(location(ctx), expression);\n }\n \n @Override\n@@ -851,7 +846,7 @@ public Object visitArgument(ArgumentContext ctx) {\n } else if (ctx.funcref() != null) {\n return visit(ctx.funcref());\n } else {\n- throw new IllegalStateException(\"Error \" + location(ctx) + \" Illegal tree structure.\");\n+ throw location(ctx).createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import org.elasticsearch.painless.Definition.Cast;\n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.AnalyzerCaster;\n import org.elasticsearch.painless.Variables;\n import org.objectweb.asm.Label;\n@@ -98,8 +99,8 @@ public abstract class AExpression extends ANode {\n */\n protected Label fals = null;\n \n- public AExpression(int line, int offset, String location) {\n- super(line, offset, location);\n+ public AExpression(Location location) {\n+ super(location);\n }\n \n /**\n@@ -124,18 +125,18 @@ AExpression cast(Variables variables) {\n if (constant == null || this instanceof EConstant) {\n return this;\n } else {\n- final EConstant econstant = new EConstant(line, offset, location, constant);\n+ final EConstant econstant = new EConstant(location, constant);\n econstant.analyze(variables);\n \n if (!expected.equals(econstant.actual)) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n return econstant;\n }\n } else {\n if (constant == null) {\n- final ECast ecast = new ECast(line, offset, location, this, cast);\n+ final ECast ecast = new ECast(location, this, cast);\n ecast.statement = statement;\n ecast.actual = expected;\n ecast.isNull = isNull;\n@@ -145,28 +146,28 @@ AExpression cast(Variables variables) {\n if (expected.sort.constant) {\n constant = AnalyzerCaster.constCast(location, constant, cast);\n \n- final EConstant econstant = new EConstant(line, offset, location, constant);\n+ final EConstant econstant = new EConstant(location, constant);\n econstant.analyze(variables);\n \n if (!expected.equals(econstant.actual)) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n return econstant;\n } else if (this instanceof EConstant) {\n- final ECast ecast = new ECast(line, offset, location, this, cast);\n+ final ECast ecast = new ECast(location, this, cast);\n ecast.actual = expected;\n \n return ecast;\n } else {\n- final EConstant econstant = new EConstant(line, offset, location, constant);\n+ final EConstant econstant = new EConstant(location, constant);\n econstant.analyze(variables);\n \n if (!actual.equals(econstant.actual)) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n- final ECast ecast = new ECast(line, offset, location, econstant, cast);\n+ final ECast ecast = new ECast(location, econstant, cast);\n ecast.actual = expected;\n \n return ecast;", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n \n@@ -73,8 +74,8 @@ public abstract class ALink extends ANode {\n */\n String string = null;\n \n- ALink(int line, int offset, String location, int size) {\n- super(line, offset, location);\n+ ALink(Location location, int size) {\n+ super(location);\n \n this.size = size;\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ALink.java", "status": "modified" }, { "diff": "@@ -19,33 +19,22 @@\n \n package org.elasticsearch.painless.node;\n \n+import org.elasticsearch.painless.Location;\n+\n /**\n * The superclass for all other nodes.\n */\n public abstract class ANode {\n-\n- /**\n- * The line number in the original source used for debugging and errors.\n- */\n- final int line;\n-\n /**\n- * The character offset in the original source used for debugging and errors.\n+ * The identifier of the script and character offset used for debugging and errors.\n */\n- final int offset;\n+ final Location location;\n \n- /**\n- * The location in the original source to be printed in error messages.\n- */\n- final String location;\n-\n- ANode(int line, int offset, String location) {\n- this.line = line;\n- this.offset = offset;\n+ ANode(Location location) {\n this.location = location;\n }\n-\n- public String error(final String message) {\n- return \"Error \" + location + \": \" + message;\n+ \n+ public RuntimeException createError(RuntimeException exception) {\n+ return location.createError(exception);\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ANode.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import org.elasticsearch.painless.Variables;\n import org.objectweb.asm.Label;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.MethodWriter;\n \n /**\n@@ -107,8 +108,8 @@ public abstract class AStatement extends ANode {\n */\n Label brake = null;\n \n- AStatement(int line, int offset, String location) {\n- super(line, offset, location);\n+ AStatement(Location location) {\n+ super(location);\n }\n \n /**", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.elasticsearch.painless.Definition;\n import org.elasticsearch.painless.Definition.Sort;\n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.MethodWriter;\n import org.elasticsearch.painless.Operation;\n import org.elasticsearch.painless.Variables;\n@@ -38,8 +39,8 @@ public final class EBinary extends AExpression {\n \n boolean cat = false;\n \n- public EBinary(int line, int offset, String location, Operation operation, AExpression left, AExpression right) {\n- super(line, offset, location);\n+ public EBinary(Location location, Operation operation, AExpression left, AExpression right) {\n+ super(location);\n \n this.operation = operation;\n this.left = left;\n@@ -71,7 +72,7 @@ void analyze(Variables variables) {\n } else if (operation == Operation.BWOR) {\n analyzeBWOr(variables);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -82,7 +83,7 @@ private void analyzeMul(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply multiply [*] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply multiply [*] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -104,7 +105,7 @@ private void analyzeMul(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = (double)left.constant * (double)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -118,7 +119,7 @@ private void analyzeDiv(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply divide [/] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply divide [/] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -131,16 +132,20 @@ private void analyzeDiv(Variables variables) {\n if (left.constant != null && right.constant != null) {\n Sort sort = promote.sort;\n \n- if (sort == Sort.INT) {\n- constant = (int)left.constant / (int)right.constant;\n- } else if (sort == Sort.LONG) {\n- constant = (long)left.constant / (long)right.constant;\n- } else if (sort == Sort.FLOAT) {\n- constant = (float)left.constant / (float)right.constant;\n- } else if (sort == Sort.DOUBLE) {\n- constant = (double)left.constant / (double)right.constant;\n- } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ try {\n+ if (sort == Sort.INT) {\n+ constant = (int)left.constant / (int)right.constant;\n+ } else if (sort == Sort.LONG) {\n+ constant = (long)left.constant / (long)right.constant;\n+ } else if (sort == Sort.FLOAT) {\n+ constant = (float)left.constant / (float)right.constant;\n+ } else if (sort == Sort.DOUBLE) {\n+ constant = (double)left.constant / (double)right.constant;\n+ } else {\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n+ }\n+ } catch (ArithmeticException e) {\n+ throw createError(e);\n }\n }\n \n@@ -154,7 +159,7 @@ private void analyzeRem(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply remainder [%] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply remainder [%] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -167,16 +172,20 @@ private void analyzeRem(Variables variables) {\n if (left.constant != null && right.constant != null) {\n Sort sort = promote.sort;\n \n- if (sort == Sort.INT) {\n- constant = (int)left.constant % (int)right.constant;\n- } else if (sort == Sort.LONG) {\n- constant = (long)left.constant % (long)right.constant;\n- } else if (sort == Sort.FLOAT) {\n- constant = (float)left.constant % (float)right.constant;\n- } else if (sort == Sort.DOUBLE) {\n- constant = (double)left.constant % (double)right.constant;\n- } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ try {\n+ if (sort == Sort.INT) {\n+ constant = (int)left.constant % (int)right.constant;\n+ } else if (sort == Sort.LONG) {\n+ constant = (long)left.constant % (long)right.constant;\n+ } else if (sort == Sort.FLOAT) {\n+ constant = (float)left.constant % (float)right.constant;\n+ } else if (sort == Sort.DOUBLE) {\n+ constant = (double)left.constant % (double)right.constant;\n+ } else {\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n+ }\n+ } catch (ArithmeticException e) {\n+ throw createError(e);\n }\n }\n \n@@ -190,7 +199,7 @@ private void analyzeAdd(Variables variables) {\n Type promote = AnalyzerCaster.promoteAdd(left.actual, right.actual);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply add [+] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply add [+] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -228,7 +237,7 @@ private void analyzeAdd(Variables variables) {\n } else if (sort == Sort.STRING) {\n constant = \"\" + left.constant + right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -242,7 +251,7 @@ private void analyzeSub(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply subtract [-] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply subtract [-] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -264,7 +273,7 @@ private void analyzeSub(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = (double)left.constant - (double)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -278,7 +287,7 @@ private void analyzeLSH(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, false);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply left shift [<<] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply left shift [<<] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -297,7 +306,7 @@ private void analyzeLSH(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = (long)left.constant << (int)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -311,7 +320,7 @@ private void analyzeRSH(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, false);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply right shift [>>] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply right shift [>>] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -330,7 +339,7 @@ private void analyzeRSH(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = (long)left.constant >> (int)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -344,7 +353,7 @@ private void analyzeUSH(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, false);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply unsigned shift [>>>] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply unsigned shift [>>>] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -363,7 +372,7 @@ private void analyzeUSH(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = (long)left.constant >>> (int)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -377,7 +386,7 @@ private void analyzeBWAnd(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, false);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply and [&] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply and [&] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -395,7 +404,7 @@ private void analyzeBWAnd(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = (long)left.constant & (long)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -409,7 +418,7 @@ private void analyzeXor(Variables variables) {\n Type promote = AnalyzerCaster.promoteXor(left.actual, right.actual);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply xor [^] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply xor [^] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -429,7 +438,7 @@ private void analyzeXor(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = (long)left.constant ^ (long)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -443,7 +452,7 @@ private void analyzeBWOr(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, false);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply or [|] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply or [|] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -461,7 +470,7 @@ private void analyzeBWOr(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = (long)left.constant | (long)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -470,7 +479,7 @@ private void analyzeBWOr(Variables variables) {\n \n @Override\n void write(MethodWriter writer) {\n- writer.writeDebugInfo(offset);\n+ writer.writeDebugInfo(location);\n if (actual.sort == Sort.STRING && operation == Operation.ADD) {\n if (!cat) {\n writer.writeNewStrings();", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Operation;\n import org.elasticsearch.painless.Variables;\n import org.objectweb.asm.Label;\n@@ -34,8 +35,8 @@ public final class EBool extends AExpression {\n AExpression left;\n AExpression right;\n \n- public EBool(int line, int offset, String location, Operation operation, AExpression left, AExpression right) {\n- super(line, offset, location);\n+ public EBool(Location location, Operation operation, AExpression left, AExpression right) {\n+ super(location);\n \n this.operation = operation;\n this.left = left;\n@@ -58,7 +59,7 @@ void analyze(Variables variables) {\n } else if (operation == Operation.OR) {\n constant = (boolean)left.constant || (boolean)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -95,7 +96,7 @@ void write(MethodWriter writer) {\n writer.mark(localtru);\n }\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n } else {\n if (operation == Operation.AND) {\n@@ -131,7 +132,7 @@ void write(MethodWriter writer) {\n writer.push(false);\n writer.mark(end);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBool.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n \n@@ -28,8 +29,8 @@\n */\n public final class EBoolean extends AExpression {\n \n- public EBoolean(int line, int offset, String location, boolean constant) {\n- super(line, offset, location);\n+ public EBoolean(Location location, boolean constant) {\n+ super(location);\n \n this.constant = constant;\n }\n@@ -41,6 +42,6 @@ void analyze(Variables variables) {\n \n @Override\n void write(MethodWriter adapter) {\n- throw new IllegalArgumentException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBoolean.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition.Cast;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n \n@@ -34,8 +35,8 @@ final class ECast extends AExpression {\n \n Cast cast = null;\n \n- ECast(int line, int offset, String location, AExpression child, Cast cast) {\n- super(line, offset, location);\n+ ECast(Location location, AExpression child, Cast cast) {\n+ super(location);\n \n this.type = null;\n this.child = child;\n@@ -45,13 +46,13 @@ final class ECast extends AExpression {\n \n @Override\n void analyze(Variables variables) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n @Override\n void write(MethodWriter writer) {\n child.write(writer);\n- writer.writeDebugInfo(offset);\n+ writer.writeDebugInfo(location);\n writer.writeCast(cast);\n writer.writeBranch(tru, fals);\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.elasticsearch.painless.Definition.Cast;\n import org.elasticsearch.painless.Definition.Sort;\n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.AnalyzerCaster;\n import org.elasticsearch.painless.Operation;\n import org.elasticsearch.painless.Variables;\n@@ -46,9 +47,9 @@ public final class EChain extends AExpression {\n Cast there = null;\n Cast back = null;\n \n- public EChain(int line, int offset, String location, List<ALink> links,\n+ public EChain(Location location, List<ALink> links,\n boolean pre, boolean post, Operation operation, AExpression expression) {\n- super(line, offset, location);\n+ super(location);\n \n this.links = links;\n this.pre = pre;\n@@ -114,40 +115,40 @@ private void analyzeIncrDecr() {\n ALink last = links.get(links.size() - 1);\n \n if (pre && post) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n } else if (pre || post) {\n if (expression != null) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n Sort sort = last.after.sort;\n \n if (operation == Operation.INCR) {\n if (sort == Sort.DOUBLE) {\n- expression = new EConstant(line, offset, location, 1D);\n+ expression = new EConstant(location, 1D);\n } else if (sort == Sort.FLOAT) {\n- expression = new EConstant(line, offset, location, 1F);\n+ expression = new EConstant(location, 1F);\n } else if (sort == Sort.LONG) {\n- expression = new EConstant(line, offset, location, 1L);\n+ expression = new EConstant(location, 1L);\n } else {\n- expression = new EConstant(line, offset, location, 1);\n+ expression = new EConstant(location, 1);\n }\n \n operation = Operation.ADD;\n } else if (operation == Operation.DECR) {\n if (sort == Sort.DOUBLE) {\n- expression = new EConstant(line, offset, location, 1D);\n+ expression = new EConstant(location, 1D);\n } else if (sort == Sort.FLOAT) {\n- expression = new EConstant(line, offset, location, 1F);\n+ expression = new EConstant(location, 1F);\n } else if (sort == Sort.LONG) {\n- expression = new EConstant(line, offset, location, 1L);\n+ expression = new EConstant(location, 1L);\n } else {\n- expression = new EConstant(line, offset, location, 1);\n+ expression = new EConstant(location, 1);\n }\n \n operation = Operation.SUB;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n }\n@@ -180,12 +181,12 @@ private void analyzeCompound(Variables variables) {\n } else if (operation == Operation.BWOR) {\n promote = AnalyzerCaster.promoteXor(last.after, expression.actual);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n if (promote == null) {\n- throw new ClassCastException(\"Cannot apply compound assignment \" +\n- \"[\" + operation.symbol + \"=] to types [\" + last.after + \"] and [\" + expression.actual + \"].\");\n+ throw createError(new ClassCastException(\"Cannot apply compound assignment \" +\n+ \"[\" + operation.symbol + \"=] to types [\" + last.after + \"] and [\" + expression.actual + \"].\"));\n }\n \n cat = operation == Operation.ADD && promote.sort == Sort.STRING;\n@@ -248,9 +249,8 @@ private void analyzeRead() {\n \n @Override\n void write(MethodWriter writer) {\n- if (cat) {\n- writer.writeDebugInfo(offset);\n- }\n+ // can cause class cast exception among other things at runtime\n+ writer.writeDebugInfo(location);\n \n if (cat) {\n writer.writeNewStrings();", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.painless.Definition;\n import org.elasticsearch.painless.Definition.Sort;\n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.AnalyzerCaster;\n import org.elasticsearch.painless.Operation;\n import org.elasticsearch.painless.Variables;\n@@ -46,8 +47,8 @@ public final class EComp extends AExpression {\n AExpression left;\n AExpression right;\n \n- public EComp(int line, int offset, String location, Operation operation, AExpression left, AExpression right) {\n- super(line, offset, location);\n+ public EComp(Location location, Operation operation, AExpression left, AExpression right) {\n+ super(location);\n \n this.operation = operation;\n this.left = left;\n@@ -73,7 +74,7 @@ void analyze(Variables variables) {\n } else if (operation == Operation.LT) {\n analyzeLT(variables);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -84,7 +85,7 @@ private void analyzeEq(Variables variables) {\n Type promote = AnalyzerCaster.promoteEquality(left.actual, right.actual);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply equals [==] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply equals [==] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -95,7 +96,7 @@ private void analyzeEq(Variables variables) {\n right = right.cast(variables);\n \n if (left.isNull && right.isNull) {\n- throw new IllegalArgumentException(error(\"Extraneous comparison of null constants.\"));\n+ throw createError(new IllegalArgumentException(\"Extraneous comparison of null constants.\"));\n }\n \n if ((left.constant != null || left.isNull) && (right.constant != null || right.isNull)) {\n@@ -116,7 +117,7 @@ private void analyzeEq(Variables variables) {\n } else if (!right.isNull) {\n constant = right.constant.equals(null);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -130,7 +131,7 @@ private void analyzeEqR(Variables variables) {\n Type promote = AnalyzerCaster.promoteEquality(left.actual, right.actual);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply reference equals [===] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply reference equals [===] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -141,7 +142,7 @@ private void analyzeEqR(Variables variables) {\n right = right.cast(variables);\n \n if (left.isNull && right.isNull) {\n- throw new IllegalArgumentException(error(\"Extraneous comparison of null constants.\"));\n+ throw createError(new IllegalArgumentException(\"Extraneous comparison of null constants.\"));\n }\n \n if ((left.constant != null || left.isNull) && (right.constant != null || right.isNull)) {\n@@ -172,7 +173,7 @@ private void analyzeNE(Variables variables) {\n Type promote = AnalyzerCaster.promoteEquality(left.actual, right.actual);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply not equals [!=] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply not equals [!=] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -183,7 +184,7 @@ private void analyzeNE(Variables variables) {\n right = right.cast(variables);\n \n if (left.isNull && right.isNull) {\n- throw new IllegalArgumentException(error(\"Extraneous comparison of null constants.\"));\n+ throw createError(new IllegalArgumentException(\"Extraneous comparison of null constants.\"));\n }\n \n if ((left.constant != null || left.isNull) && (right.constant != null || right.isNull)) {\n@@ -204,7 +205,7 @@ private void analyzeNE(Variables variables) {\n } else if (!right.isNull) {\n constant = !right.constant.equals(null);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -218,7 +219,7 @@ private void analyzeNER(Variables variables) {\n Type promote = AnalyzerCaster.promoteEquality(left.actual, right.actual);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply reference not equals [!==] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply reference not equals [!==] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -229,7 +230,7 @@ private void analyzeNER(Variables variables) {\n right = right.cast(variables);\n \n if (left.isNull && right.isNull) {\n- throw new IllegalArgumentException(error(\"Extraneous comparison of null constants.\"));\n+ throw createError(new IllegalArgumentException(\"Extraneous comparison of null constants.\"));\n }\n \n if ((left.constant != null || left.isNull) && (right.constant != null || right.isNull)) {\n@@ -260,7 +261,7 @@ private void analyzeGTE(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply greater than or equals [>=] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply greater than or equals [>=] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -282,7 +283,7 @@ private void analyzeGTE(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = (double)left.constant >= (double)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -296,7 +297,7 @@ private void analyzeGT(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply greater than [>] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply greater than [>] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -318,7 +319,7 @@ private void analyzeGT(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = (double)left.constant > (double)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -332,7 +333,7 @@ private void analyzeLTE(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply less than or equals [<=] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply less than or equals [<=] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -354,7 +355,7 @@ private void analyzeLTE(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = (double)left.constant <= (double)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -368,7 +369,7 @@ private void analyzeLT(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply less than [>=] to types \" +\n+ throw createError(new ClassCastException(\"Cannot apply less than [>=] to types \" +\n \"[\" + left.actual.name + \"] and [\" + right.actual.name + \"].\"));\n }\n \n@@ -390,7 +391,7 @@ private void analyzeLT(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = (double)left.constant < (double)right.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -399,7 +400,7 @@ private void analyzeLT(Variables variables) {\n \n @Override\n void write(MethodWriter writer) {\n- writer.writeDebugInfo(offset);\n+ writer.writeDebugInfo(location);\n boolean branch = tru != null || fals != null;\n org.objectweb.asm.Type rtype = right.actual.type;\n Sort rsort = right.actual.sort;\n@@ -429,12 +430,12 @@ void write(MethodWriter writer) {\n case BYTE:\n case SHORT:\n case CHAR:\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n case BOOL:\n if (eq) writer.ifZCmp(MethodWriter.EQ, jump);\n else if (ne) writer.ifZCmp(MethodWriter.NE, jump);\n else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n break;\n@@ -449,7 +450,7 @@ void write(MethodWriter writer) {\n else if (gt) writer.ifCmp(rtype, MethodWriter.GT, jump);\n else if (gte) writer.ifCmp(rtype, MethodWriter.GE, jump);\n else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n break;\n@@ -485,7 +486,7 @@ void write(MethodWriter writer) {\n writer.invokeStatic(DEF_UTIL_TYPE, DEF_GTE_CALL);\n writejump = false;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n if (branch && !writejump) {\n@@ -518,7 +519,7 @@ void write(MethodWriter writer) {\n writer.ifCmp(rtype, MethodWriter.NE, jump);\n }\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n ", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import org.elasticsearch.painless.Definition;\n import org.elasticsearch.painless.Definition.Type;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.AnalyzerCaster;\n import org.elasticsearch.painless.Variables;\n import org.objectweb.asm.Label;\n@@ -35,8 +36,8 @@ public final class EConditional extends AExpression {\n AExpression left;\n AExpression right;\n \n- public EConditional(int line, int offset, String location, AExpression condition, AExpression left, AExpression right) {\n- super(line, offset, location);\n+ public EConditional(Location location, AExpression condition, AExpression left, AExpression right) {\n+ super(location);\n \n this.condition = condition;\n this.left = left;\n@@ -50,7 +51,7 @@ void analyze(Variables variables) {\n condition = condition.cast(variables);\n \n if (condition.constant != null) {\n- throw new IllegalArgumentException(error(\"Extraneous conditional statement.\"));\n+ throw createError(new IllegalArgumentException(\"Extraneous conditional statement.\"));\n }\n \n left.expected = expected;\n@@ -78,7 +79,7 @@ void analyze(Variables variables) {\n \n @Override\n void write(MethodWriter writer) {\n- writer.writeDebugInfo(offset);\n+ writer.writeDebugInfo(location);\n Label localfals = new Label();\n Label end = new Label();\n ", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java", "status": "modified" }, { "diff": "@@ -21,17 +21,18 @@\n \n import org.elasticsearch.painless.Definition;\n import org.elasticsearch.painless.Definition.Sort;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n \n /**\n- * Respresents a constant. Note this replaces any other expression\n+ * Represents a constant. Note this replaces any other expression\n * node with a constant value set during a cast. (Internal only.)\n */\n final class EConstant extends AExpression {\n \n- EConstant(int line, int offset, String location, Object constant) {\n- super(line, offset, location);\n+ EConstant(Location location, Object constant) {\n+ super(location);\n \n this.constant = constant;\n }\n@@ -57,7 +58,7 @@ void analyze(Variables variables) {\n } else if (constant instanceof Boolean) {\n actual = Definition.BOOLEAN_TYPE;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -85,7 +86,7 @@ void write(MethodWriter writer) {\n \n break;\n default:\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n if (sort != Sort.BOOL) {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConstant.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n \n@@ -30,8 +31,8 @@ public final class EDecimal extends AExpression {\n \n final String value;\n \n- public EDecimal(int line, int offset, String location, String value) {\n- super(line, offset, location);\n+ public EDecimal(Location location, String value) {\n+ super(location);\n \n this.value = value;\n }\n@@ -43,20 +44,20 @@ void analyze(Variables variables) {\n constant = Float.parseFloat(value.substring(0, value.length() - 1));\n actual = Definition.FLOAT_TYPE;\n } catch (NumberFormatException exception) {\n- throw new IllegalArgumentException(error(\"Invalid float constant [\" + value + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Invalid float constant [\" + value + \"].\"));\n }\n } else {\n try {\n constant = Double.parseDouble(value);\n actual = Definition.DOUBLE_TYPE;\n } catch (NumberFormatException exception) {\n- throw new IllegalArgumentException(error(\"Invalid double constant [\" + value + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Invalid double constant [\" + value + \"].\"));\n }\n }\n }\n \n @Override\n void write(MethodWriter writer) {\n- throw new IllegalArgumentException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EDecimal.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n \n@@ -31,8 +32,8 @@ public final class EExplicit extends AExpression {\n final String type;\n AExpression child;\n \n- public EExplicit(int line, int offset, String location, String type, AExpression child) {\n- super(line, offset, location);\n+ public EExplicit(Location location, String type, AExpression child) {\n+ super(location);\n \n this.type = type;\n this.child = child;\n@@ -43,7 +44,7 @@ void analyze(Variables variables) {\n try {\n actual = Definition.getType(this.type);\n } catch (IllegalArgumentException exception) {\n- throw new IllegalArgumentException(error(\"Not a type [\" + this.type + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Not a type [\" + this.type + \"].\"));\n }\n \n child.expected = actual;\n@@ -54,7 +55,7 @@ void analyze(Variables variables) {\n \n @Override\n void write(MethodWriter writer) {\n- throw new IllegalArgumentException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n AExpression cast(Variables variables) {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.painless.node;\n \n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.MethodWriter;\n import org.elasticsearch.painless.Variables;\n \n@@ -29,20 +30,21 @@ public class EFunctionRef extends AExpression {\n public String type;\n public String call;\n \n- public EFunctionRef(int line, int offset, String location, String type, String call) {\n- super(line, offset, location);\n+ public EFunctionRef(Location location, String type, String call) {\n+ super(location);\n \n this.type = type;\n this.call = call;\n }\n \n @Override\n void analyze(Variables variables) {\n- throw new UnsupportedOperationException(error(\"Function references [\" + type + \"::\" + call + \"] are not currently supported.\"));\n+ throw createError(new UnsupportedOperationException(\"Function references [\" + type + \"::\" + call + \n+ \"] are not currently supported.\"));\n }\n \n @Override\n void write(MethodWriter writer) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Variables;\n import org.objectweb.asm.Opcodes;\n import org.elasticsearch.painless.MethodWriter;\n@@ -29,8 +30,8 @@\n */\n public final class ENull extends AExpression {\n \n- public ENull(int line, int offset, String location) {\n- super(line, offset, location);\n+ public ENull(Location location) {\n+ super(location);\n }\n \n @Override\n@@ -39,7 +40,7 @@ void analyze(Variables variables) {\n \n if (expected != null) {\n if (expected.sort.primitive) {\n- throw new IllegalArgumentException(error(\"Cannot cast null to a primitive type [\" + expected.name + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Cannot cast null to a primitive type [\" + expected.name + \"].\"));\n }\n \n actual = expected;", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Definition.Sort;\n import org.elasticsearch.painless.Variables;\n import org.elasticsearch.painless.MethodWriter;\n@@ -32,8 +33,8 @@ public final class ENumeric extends AExpression {\n final String value;\n int radix;\n \n- public ENumeric(int line, int offset, String location, String value, int radix) {\n- super(line, offset, location);\n+ public ENumeric(Location location, String value, int radix) {\n+ super(location);\n \n this.value = value;\n this.radix = radix;\n@@ -43,32 +44,32 @@ public ENumeric(int line, int offset, String location, String value, int radix)\n void analyze(Variables variables) {\n if (value.endsWith(\"d\") || value.endsWith(\"D\")) {\n if (radix != 10) {\n- throw new IllegalStateException(error(\"Invalid tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n try {\n constant = Double.parseDouble(value.substring(0, value.length() - 1));\n actual = Definition.DOUBLE_TYPE;\n } catch (NumberFormatException exception) {\n- throw new IllegalArgumentException(error(\"Invalid double constant [\" + value + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Invalid double constant [\" + value + \"].\"));\n }\n } else if (value.endsWith(\"f\") || value.endsWith(\"F\")) {\n if (radix != 10) {\n- throw new IllegalStateException(error(\"Invalid tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n try {\n constant = Float.parseFloat(value.substring(0, value.length() - 1));\n actual = Definition.FLOAT_TYPE;\n } catch (NumberFormatException exception) {\n- throw new IllegalArgumentException(error(\"Invalid float constant [\" + value + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Invalid float constant [\" + value + \"].\"));\n }\n } else if (value.endsWith(\"l\") || value.endsWith(\"L\")) {\n try {\n constant = Long.parseLong(value.substring(0, value.length() - 1), radix);\n actual = Definition.LONG_TYPE;\n } catch (NumberFormatException exception) {\n- throw new IllegalArgumentException(error(\"Invalid long constant [\" + value + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Invalid long constant [\" + value + \"].\"));\n }\n } else {\n try {\n@@ -89,13 +90,13 @@ void analyze(Variables variables) {\n actual = Definition.INT_TYPE;\n }\n } catch (NumberFormatException exception) {\n- throw new IllegalArgumentException(error(\"Invalid int constant [\" + value + \"].\"));\n+ throw createError(new IllegalArgumentException(\"Invalid int constant [\" + value + \"].\"));\n }\n }\n }\n \n @Override\n void write(MethodWriter writer) {\n- throw new IllegalArgumentException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENumeric.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.painless.node;\n \n import org.elasticsearch.painless.Definition;\n+import org.elasticsearch.painless.Location;\n import org.elasticsearch.painless.Definition.Sort;\n import org.elasticsearch.painless.Definition.Type;\n import org.elasticsearch.painless.AnalyzerCaster;\n@@ -40,8 +41,8 @@ public final class EUnary extends AExpression {\n final Operation operation;\n AExpression child;\n \n- public EUnary(int line, int offset, String location, Operation operation, AExpression child) {\n- super(line, offset, location);\n+ public EUnary(Location location, Operation operation, AExpression child) {\n+ super(location);\n \n this.operation = operation;\n this.child = child;\n@@ -58,7 +59,7 @@ void analyze(Variables variables) {\n } else if (operation == Operation.SUB) {\n analyzerSub(variables);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -80,7 +81,7 @@ void analyzeBWNot(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(child.actual, false);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply not [~] to type [\" + child.actual.name + \"].\"));\n+ throw createError(new ClassCastException(\"Cannot apply not [~] to type [\" + child.actual.name + \"].\"));\n }\n \n child.expected = promote;\n@@ -94,7 +95,7 @@ void analyzeBWNot(Variables variables) {\n } else if (sort == Sort.LONG) {\n constant = ~(long)child.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -107,7 +108,7 @@ void analyzerAdd(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(child.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply positive [+] to type [\" + child.actual.name + \"].\"));\n+ throw createError(new ClassCastException(\"Cannot apply positive [+] to type [\" + child.actual.name + \"].\"));\n }\n \n child.expected = promote;\n@@ -125,7 +126,7 @@ void analyzerAdd(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = +(double)child.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -138,7 +139,7 @@ void analyzerSub(Variables variables) {\n Type promote = AnalyzerCaster.promoteNumeric(child.actual, true);\n \n if (promote == null) {\n- throw new ClassCastException(error(\"Cannot apply negative [-] to type [\" + child.actual.name + \"].\"));\n+ throw createError(new ClassCastException(\"Cannot apply negative [-] to type [\" + child.actual.name + \"].\"));\n }\n \n child.expected = promote;\n@@ -156,7 +157,7 @@ void analyzerSub(Variables variables) {\n } else if (sort == Sort.DOUBLE) {\n constant = -(double)child.constant;\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n }\n \n@@ -165,7 +166,7 @@ void analyzerSub(Variables variables) {\n \n @Override\n void write(MethodWriter writer) {\n- writer.writeDebugInfo(offset);\n+ writer.writeDebugInfo(location);\n if (operation == Operation.NOT) {\n if (tru == null && fals == null) {\n Label localfals = new Label();\n@@ -199,7 +200,7 @@ void write(MethodWriter writer) {\n } else if (sort == Sort.LONG) {\n writer.push(-1L);\n } else {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n writer.math(MethodWriter.XOR, type);\n@@ -211,7 +212,7 @@ void write(MethodWriter writer) {\n writer.math(MethodWriter.NEG, type);\n }\n } else if (operation != Operation.ADD) {\n- throw new IllegalStateException(error(\"Illegal tree structure.\"));\n+ throw createError(new IllegalStateException(\"Illegal tree structure.\"));\n }\n \n writer.writeBranch(tru, fals);", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java", "status": "modified" } ] }
{ "body": "Relocation of non-primary shards is realized by recovering from the primary shard. `RoutingNodes` wrongly equates non-primary relocation as recovering a shard from the non-primary relocation source, however. This invalidates the checks in the `ThrottlingAllocationDecider` where the number of concurrent incoming/outgoing recoveries are bound using the settings `cluster.routing.allocation.node_concurrent_recoveries`, `cluster.routing.allocation.node_concurrent_incoming_recoveries` and `cluster.routing.allocation.node_concurrent_outgoing_recoveries`.\n", "comments": [ { "body": "+1 good find\n", "created_at": "2016-05-30T14:53:23Z" }, { "body": "@ywelsch What version(s) of Elasticsearch does this bug affect?\n", "created_at": "2016-06-01T19:55:12Z" }, { "body": "@jonaf This only affects v5, previous versions used other mechanisms to throttle recoveries (see https://www.elastic.co/guide/en/elasticsearch/reference/2.3/recovery.html and in particular `indices.recovery.concurrent_streams`)\n", "created_at": "2016-06-02T14:24:22Z" } ], "number": 18640, "title": "Recovery throttling does not properly account for relocating non-primary shards" }
{ "body": "Relocation of non-primary shards is realized by recovering from the primary shard. Recovery throttling wrongly equates non-primary relocation as recovering a shard from the non-primary relocation source, however.\n\nCloses #18640\n", "number": 18701, "review_comments": [ { "body": "can it be unassigned too? I am still trying to get back into how this works but I wonder why :)\n", "created_at": "2016-06-02T14:06:36Z" }, { "body": "here, we're iterating over the shards assigned to a node (further up outside Github view: `if (shard.assignedToNode()) {`).\n", "created_at": "2016-06-02T14:09:56Z" }, { "body": "this is nice!\n", "created_at": "2016-06-02T14:20:21Z" }, { "body": "why is this public now can it be private still?\n", "created_at": "2016-06-02T14:22:08Z" }, { "body": "so this means that we either have a replica peer recovery (primary == false) or we have a primary relocation that means it's not assigned? I wonder if we can make this more clear? maybe assign local vars?\n", "created_at": "2016-06-02T14:25:19Z" }, { "body": "right, I used it in ThrottlingAllocationDecider, then saw that we have the nicer method \"activePrimary\" here.\n", "created_at": "2016-06-02T14:26:38Z" }, { "body": "so this is commented out, I wonder if this is really simple to do? The primaries can be allocaed for a while and be big and then we have space for the replicas and the recovery is big?\n", "created_at": "2016-06-02T14:29:44Z" }, { "body": "hmm we don't want this shortcut anymore? I mean it could speed up thinks no?\n", "created_at": "2016-06-02T14:31:18Z" }, { "body": "Yeah, I wasn't too happy on removing it. Unfortunately, it wasn't safe and there is no safe version which makes sense. Even if `incomingRecoveries` is over the limit, we might still allocate primary unassigned shards (which are only bound by `primariesInitialRecoveries`). Checking for `outgoingRecoveries` on this node to allocate a shard makes no sense at all, as allocating a shard to this node only affects the outgoing recoveries of the node that has the primary shard.\n", "created_at": "2016-06-02T14:55:32Z" }, { "body": "Unfortunately we cannot use `shardrouting.isPeerRecovery()` here because the `shardRouting` that is passed to the method is not the initializing shard that will be assigned to this node. I will add some more comments.\n\nI can also invert the flow of this `if/else` condition so that we have `shardRouting.primary() && shardRouting.unassigned()` as boolean condition which corresponds to what we currently have in the `else` branch.\n", "created_at": "2016-06-02T15:02:47Z" }, { "body": "I'm not sure I follow here. In case of index creation, we don't do any kind of \"real recovery\" for the primary shard. What we do is delete existing files and create a new index and translog, which probably amounts to tiny work. What the `ThrottlingAllocationDecider` tries to prevent (imho) is that we do too much IO on a node by recovering too many shards in parallel.\n", "created_at": "2016-06-02T15:32:12Z" }, { "body": "I think it would be enough to use descriptive local vars?\n", "created_at": "2016-06-02T15:44:43Z" }, { "body": "yes that is true for primaries not necessarily for replicas. The replicas might not be a light recovery\n", "created_at": "2016-06-02T15:45:27Z" }, { "body": "ok fair enough\n", "created_at": "2016-06-02T15:45:45Z" }, { "body": "For replicas, we do not reach this point in the code (they are covered above by peer recovery and are checked against currentInRecoveries and primaryNodeOutRecoveries).\n\nAre you in favour of activating this code? I will then also have to modify the calculation of `incomingRecoveries` in `RoutingNodes` to make sure that these are not counted as recoveries.\n", "created_at": "2016-06-02T15:53:47Z" }, { "body": "no I think not here but I wonder why it's commeted out? can we remove it?\n", "created_at": "2016-06-03T07:40:49Z" }, { "body": "can we add a check that the primary is active?\n", "created_at": "2016-06-03T10:59:26Z" }, { "body": "I had that one originally there. But this fails some tests currently as we don't always add the ReplicaAfterPrimaryActiveAllocationDecider to the list of deciders in these tests. As this impacts more than a few places, I decided against tackling that one in this PR.\n", "created_at": "2016-06-03T11:05:36Z" }, { "body": "hm... this effectively makes ReplicaAfterPrimaryActiveAllocationDecider irelevant , right? \n", "created_at": "2016-06-03T11:24:00Z" }, { "body": "question - do routing nodes always reflect the current state of affairs? if the balancer makes multiple decision about multiple shards , are these all reflected in routing nodes?\n", "created_at": "2016-06-03T11:25:09Z" }, { "body": "Ok fair enough. can you leave a comment?\n", "created_at": "2016-06-03T11:32:33Z" }, { "body": "nit: can we move the look up of the primary to the callers that pass null? this method is hairy enough :)\n", "created_at": "2016-06-03T11:35:06Z" }, { "body": "This duplicates indeed the `ReplicaAfterPrimaryActiveAllocationDecider` but checks the property only after we have checked the number of incoming recoveries. I can collapse both so that the check happens before checking incoming recoveries. WDYT?\n", "created_at": "2016-06-03T11:39:31Z" }, { "body": "nit: I initially go confused by the method name `addRecovery` as it used to update the recovery counters for both addition and removal. maybe call it updateRecoveryCounts ?\n", "created_at": "2016-06-03T11:41:45Z" }, { "body": "yes, routing nodes always reflect the current state to which the balancer has committed. The balancer first commits to a decision before trying to allocate another shard.\n", "created_at": "2016-06-03T11:43:01Z" }, { "body": "Yeah, I think we can collapse both deciders into one here - it will make things simpler. Call it RecoveriesAllocationDecider that is incharge of all recovering shards (replicas and relocating primaries). It's good to do it in a different PR imo..\n", "created_at": "2016-06-03T11:43:27Z" } ], "title": "Fix recovery throttling to properly handle relocating non-primary shards" }
{ "commits": [ { "message": "Fix recovery throttling to properly handle relocating non-primary shards" } ], "files": [ { "diff": "@@ -108,18 +108,18 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) {\n // add the counterpart shard with relocatingNodeId reflecting the source from which\n // it's relocating from.\n ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();\n- addInitialRecovery(targetShardRouting, routingTable);\n+ addInitialRecovery(targetShardRouting, indexShard.primary);\n previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting);\n if (previousValue != null) {\n throw new IllegalArgumentException(\"Cannot have two different shards with same shard id on same node\");\n }\n assignedShardsAdd(targetShardRouting);\n- } else if (shard.active() == false) { // shards that are initializing without being relocated\n+ } else if (shard.initializing()) {\n if (shard.primary()) {\n inactivePrimaryCount++;\n }\n inactiveShardCount++;\n- addInitialRecovery(shard, routingTable);\n+ addInitialRecovery(shard, indexShard.primary);\n }\n } else {\n unassignedShards.add(shard);\n@@ -134,48 +134,44 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) {\n }\n \n private void addRecovery(ShardRouting routing) {\n- addRecovery(routing, true, null);\n+ updateRecoveryCounts(routing, true, findAssignedPrimaryIfPeerRecovery(routing));\n }\n \n private void removeRecovery(ShardRouting routing) {\n- addRecovery(routing, false, null);\n+ updateRecoveryCounts(routing, false, findAssignedPrimaryIfPeerRecovery(routing));\n }\n \n- private void addInitialRecovery(ShardRouting routing, RoutingTable routingTable) {\n- addRecovery(routing, true, routingTable);\n+ private void addInitialRecovery(ShardRouting routing, ShardRouting initialPrimaryShard) {\n+ updateRecoveryCounts(routing, true, initialPrimaryShard);\n }\n \n- private void addRecovery(final ShardRouting routing, final boolean increment, final RoutingTable routingTable) {\n+ private void updateRecoveryCounts(final ShardRouting routing, final boolean increment, @Nullable final ShardRouting primary) {\n final int howMany = increment ? 1 : -1;\n assert routing.initializing() : \"routing must be initializing: \" + routing;\n+ // TODO: check primary == null || primary.active() after all tests properly add ReplicaAfterPrimaryActiveAllocationDecider\n+ assert primary == null || primary.assignedToNode() :\n+ \"shard is initializing but its primary is not assigned to a node\";\n+\n Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany);\n- final String sourceNodeId;\n- if (routing.relocatingNodeId() != null) { // this is a relocation-target\n- sourceNodeId = routing.relocatingNodeId();\n- if (routing.primary() && increment == false) { // primary is done relocating\n+\n+ if (routing.isPeerRecovery()) {\n+ // add/remove corresponding outgoing recovery on node with primary shard\n+ if (primary == null) {\n+ throw new IllegalStateException(\"shard is peer recovering but primary is unassigned\");\n+ }\n+ Recoveries.getOrAdd(recoveriesPerNode, primary.currentNodeId()).addOutgoing(howMany);\n+\n+ if (increment == false && routing.primary() && routing.relocatingNodeId() != null) {\n+ // primary is done relocating, move non-primary recoveries from old primary to new primary\n int numRecoveringReplicas = 0;\n for (ShardRouting assigned : assignedShards(routing.shardId())) {\n- if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) {\n+ if (assigned.primary() == false && assigned.isPeerRecovery()) {\n numRecoveringReplicas++;\n }\n }\n- // we transfer the recoveries to the relocated primary\n- recoveriesPerNode.get(sourceNodeId).addOutgoing(-numRecoveringReplicas);\n+ recoveriesPerNode.get(routing.relocatingNodeId()).addOutgoing(-numRecoveringReplicas);\n recoveriesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas);\n }\n- } else if (routing.primary() == false) { // primary without relocationID is initial recovery\n- ShardRouting primary = findPrimary(routing);\n- if (primary == null && routingTable != null) {\n- primary = routingTable.index(routing.index().getName()).shard(routing.shardId().id()).primary;\n- } else if (primary == null) {\n- throw new IllegalStateException(\"replica is initializing but primary is unassigned\");\n- }\n- sourceNodeId = primary.currentNodeId();\n- } else {\n- sourceNodeId = null;\n- }\n- if (sourceNodeId != null) {\n- Recoveries.getOrAdd(recoveriesPerNode, sourceNodeId).addOutgoing(howMany);\n }\n }\n \n@@ -187,18 +183,21 @@ public int getOutgoingRecoveries(String nodeId) {\n return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing();\n }\n \n- private ShardRouting findPrimary(ShardRouting routing) {\n- List<ShardRouting> shardRoutings = assignedShards.get(routing.shardId());\n+ @Nullable\n+ private ShardRouting findAssignedPrimaryIfPeerRecovery(ShardRouting routing) {\n ShardRouting primary = null;\n- if (shardRoutings != null) {\n- for (ShardRouting shardRouting : shardRoutings) {\n- if (shardRouting.primary()) {\n- if (shardRouting.active()) {\n- return shardRouting;\n- } else if (primary == null) {\n- primary = shardRouting;\n- } else if (primary.relocatingNodeId() != null) {\n- primary = shardRouting;\n+ if (routing.isPeerRecovery()) {\n+ List<ShardRouting> shardRoutings = assignedShards.get(routing.shardId());\n+ if (shardRoutings != null) {\n+ for (ShardRouting shardRouting : shardRoutings) {\n+ if (shardRouting.primary()) {\n+ if (shardRouting.active()) {\n+ return shardRouting;\n+ } else if (primary == null) {\n+ primary = shardRouting;\n+ } else if (primary.relocatingNodeId() != null) {\n+ primary = shardRouting;\n+ }\n }\n }\n }\n@@ -500,7 +499,6 @@ public ShardRouting removeRelocationSource(ShardRouting shard) {\n ShardRouting relocationMarkerRemoved = shard.removeRelocationSource();\n updateAssigned(shard, relocationMarkerRemoved);\n inactiveShardCount++; // relocation targets are not counted as inactive shards whereas initializing shards are\n- Recoveries.getOrAdd(recoveriesPerNode, shard.relocatingNodeId()).addOutgoing(-1);\n return relocationMarkerRemoved;\n }\n \n@@ -856,20 +854,17 @@ public static boolean assertShardStats(RoutingNodes routingNodes) {\n for (ShardRouting routing : routingNode) {\n if (routing.initializing()) {\n incoming++;\n- } else if (routing.relocating()) {\n- outgoing++;\n }\n- if (routing.primary() && (routing.initializing() && routing.relocatingNodeId() != null) == false) { // we don't count the initialization end of the primary relocation\n- List<ShardRouting> shardRoutings = routingNodes.assignedShards.get(routing.shardId());\n- for (ShardRouting assigned : shardRoutings) {\n- if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) {\n+ if (routing.primary() && routing.isPeerRecovery() == false) {\n+ for (ShardRouting assigned : routingNodes.assignedShards.get(routing.shardId())) {\n+ if (assigned.isPeerRecovery()) {\n outgoing++;\n }\n }\n }\n }\n }\n- assert incoming == value.incoming : incoming + \" != \" + value.incoming;\n+ assert incoming == value.incoming : incoming + \" != \" + value.incoming + \" node: \" + routingNode;\n assert outgoing == value.outgoing : outgoing + \" != \" + value.outgoing + \" node: \" + routingNode;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java", "status": "modified" }, { "diff": "@@ -28,6 +28,9 @@\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n \n+import static org.elasticsearch.cluster.routing.allocation.decider.Decision.THROTTLE;\n+import static org.elasticsearch.cluster.routing.allocation.decider.Decision.YES;\n+\n /**\n * {@link ThrottlingAllocationDecider} controls the recovery process per node in\n * the cluster. It exposes two settings via the cluster update API that allow\n@@ -109,50 +112,83 @@ private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) {\n \n @Override\n public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {\n- if (shardRouting.primary()) {\n- assert shardRouting.unassigned() || shardRouting.active();\n- if (shardRouting.unassigned()) {\n- // primary is unassigned, means we are going to do recovery from gateway\n- // count *just the primary* currently doing recovery on the node and check against concurrent_recoveries\n- int primariesInRecovery = 0;\n- for (ShardRouting shard : node) {\n- // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*\n- // we only count initial recoveries here, so we need to make sure that relocating node is null\n- if (shard.initializing() && shard.primary() && shard.relocatingNodeId() == null) {\n- primariesInRecovery++;\n- }\n+ if (shardRouting.primary() && shardRouting.unassigned()) {\n+ assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery() == false;\n+ // primary is unassigned, means we are going to do recovery from store, snapshot or local shards\n+ // count *just the primaries* currently doing recovery on the node and check against primariesInitialRecoveries\n+\n+ int primariesInRecovery = 0;\n+ for (ShardRouting shard : node) {\n+ // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*\n+ // we only count initial recoveries here, so we need to make sure that relocating node is null\n+ if (shard.initializing() && shard.primary() && shard.relocatingNodeId() == null) {\n+ primariesInRecovery++;\n+ }\n+ }\n+ if (primariesInRecovery >= primariesInitialRecoveries) {\n+ // TODO: Should index creation not be throttled for primary shards?\n+ return allocation.decision(THROTTLE, NAME, \"too many primaries are currently recovering [%d], limit: [%d]\",\n+ primariesInRecovery, primariesInitialRecoveries);\n+ } else {\n+ return allocation.decision(YES, NAME, \"below primary recovery limit of [%d]\", primariesInitialRecoveries);\n+ }\n+ } else {\n+ // Peer recovery\n+ assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery();\n+\n+ // Allocating a shard to this node will increase the incoming recoveries\n+ int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());\n+ if (currentInRecoveries >= concurrentIncomingRecoveries) {\n+ return allocation.decision(THROTTLE, NAME, \"too many incoming shards are currently recovering [%d], limit: [%d]\",\n+ currentInRecoveries, concurrentIncomingRecoveries);\n+ } else {\n+ // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node\n+ ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId());\n+ if (primaryShard == null) {\n+ return allocation.decision(Decision.NO, NAME, \"primary shard for this replica is not yet active\");\n }\n- if (primariesInRecovery >= primariesInitialRecoveries) {\n- return allocation.decision(Decision.THROTTLE, NAME, \"too many primaries are currently recovering [%d], limit: [%d]\",\n- primariesInRecovery, primariesInitialRecoveries);\n+ int primaryNodeOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId());\n+ if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) {\n+ return allocation.decision(THROTTLE, NAME, \"too many outgoing shards are currently recovering [%d], limit: [%d]\",\n+ primaryNodeOutRecoveries, concurrentOutgoingRecoveries);\n } else {\n- return allocation.decision(Decision.YES, NAME, \"below primary recovery limit of [%d]\", primariesInitialRecoveries);\n+ return allocation.decision(YES, NAME, \"below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]\",\n+ primaryNodeOutRecoveries,\n+ concurrentOutgoingRecoveries,\n+ currentInRecoveries,\n+ concurrentIncomingRecoveries);\n }\n }\n }\n- // TODO should we allow shards not allocated post API to always allocate?\n- // either primary or replica doing recovery (from peer shard)\n-\n- // count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING)\n- return canAllocate(node, allocation);\n }\n \n- @Override\n- public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {\n- int currentOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(node.nodeId());\n- int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());\n- if (currentOutRecoveries >= concurrentOutgoingRecoveries) {\n- return allocation.decision(Decision.THROTTLE, NAME, \"too many outgoing shards are currently recovering [%d], limit: [%d]\",\n- currentOutRecoveries, concurrentOutgoingRecoveries);\n- } else if (currentInRecoveries >= concurrentIncomingRecoveries) {\n- return allocation.decision(Decision.THROTTLE, NAME, \"too many incoming shards are currently recovering [%d], limit: [%d]\",\n- currentInRecoveries, concurrentIncomingRecoveries);\n- } else {\n- return allocation.decision(Decision.YES, NAME, \"below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]\",\n- currentOutRecoveries,\n- concurrentOutgoingRecoveries,\n- currentInRecoveries,\n- concurrentIncomingRecoveries);\n+ /**\n+ * The shard routing passed to {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} is not the initializing shard to this\n+ * node but:\n+ * - the unassigned shard routing in case if we want to assign an unassigned shard to this node.\n+ * - the initializing shard routing if we want to assign the initializing shard to this node instead\n+ * - the started shard routing in case if we want to check if we can relocate to this node.\n+ * - the relocating shard routing if we want to relocate to this node now instead.\n+ *\n+ * This method returns the corresponding initializing shard that would be allocated to this node.\n+ */\n+ private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) {\n+ final ShardRouting initializingShard;\n+ if (shardRouting.unassigned()) {\n+ initializingShard = shardRouting.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);\n+ } else if (shardRouting.initializing()) {\n+ initializingShard = shardRouting.moveToUnassigned(shardRouting.unassignedInfo())\n+ .initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);\n+ } else if (shardRouting.relocating()) {\n+ initializingShard = shardRouting.cancelRelocation()\n+ .relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)\n+ .buildTargetRelocatingShard();\n+ } else {\n+ assert shardRouting.started();\n+ initializingShard = shardRouting.relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)\n+ .buildTargetRelocatingShard();\n }\n+ assert initializingShard.initializing();\n+ return initializingShard;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java", "status": "modified" }, { "diff": "@@ -34,7 +34,7 @@ private RandomShardRoutingMutator() {\n public static ShardRouting randomChange(ShardRouting shardRouting, String[] nodes) {\n switch (randomInt(2)) {\n case 0:\n- if (shardRouting.unassigned() == false) {\n+ if (shardRouting.unassigned() == false && shardRouting.primary() == false) {\n shardRouting = shardRouting.moveToUnassigned(new UnassignedInfo(randomReason(), randomAsciiOfLength(10)));\n } else if (shardRouting.unassignedInfo() != null) {\n shardRouting = shardRouting.updateUnassignedInfo(new UnassignedInfo(randomReason(), randomAsciiOfLength(10)));", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java", "status": "modified" }, { "diff": "@@ -54,7 +54,6 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {\n * amount of iterations the test allows allocation unless the same shard is\n * already allocated on a node and balances the cluster to gain optimal\n * balance.*/\n- @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/pull/18701\")\n public void testRandomDecisions() {\n RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random());\n AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java", "status": "modified" }, { "diff": "@@ -19,18 +19,22 @@\n \n package org.elasticsearch.cluster.routing.allocation;\n \n+import com.carrotsearch.hppc.IntHashSet;\n import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n+import org.elasticsearch.cluster.routing.RestoreSource;\n import org.elasticsearch.cluster.routing.RoutingTable;\n import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;\n import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;\n import org.elasticsearch.cluster.routing.allocation.decider.Decision;\n import org.elasticsearch.common.logging.ESLogger;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.snapshots.Snapshot;\n+import org.elasticsearch.snapshots.SnapshotId;\n import org.elasticsearch.test.ESAllocationTestCase;\n \n import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;\n@@ -57,9 +61,7 @@ public void testPrimaryRecoveryThrottling() {\n .put(IndexMetaData.builder(\"test\").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))\n .build();\n \n- RoutingTable routingTable = RoutingTable.builder()\n- .addAsNew(metaData.index(\"test\"))\n- .build();\n+ RoutingTable routingTable = createRecoveryRoutingTable(metaData.index(\"test\"));\n \n ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();\n \n@@ -118,9 +120,7 @@ public void testReplicaAndPrimaryRecoveryThrottling() {\n .put(IndexMetaData.builder(\"test\").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))\n .build();\n \n- RoutingTable routingTable = RoutingTable.builder()\n- .addAsNew(metaData.index(\"test\"))\n- .build();\n+ RoutingTable routingTable = createRecoveryRoutingTable(metaData.index(\"test\"));\n \n ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();\n \n@@ -188,9 +188,7 @@ public void testThrottleIncomingAndOutgoing() {\n .put(IndexMetaData.builder(\"test\").settings(settings(Version.CURRENT)).numberOfShards(9).numberOfReplicas(0))\n .build();\n \n- RoutingTable routingTable = RoutingTable.builder()\n- .addAsNew(metaData.index(\"test\"))\n- .build();\n+ RoutingTable routingTable = createRecoveryRoutingTable(metaData.index(\"test\"));\n \n ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();\n \n@@ -242,89 +240,107 @@ public void testThrottleIncomingAndOutgoing() {\n assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 1);\n }\n \n- public void testOutgoingThrottlesAllocaiton() {\n- Settings settings = Settings.builder()\n- .put(\"cluster.routing.allocation.node_concurrent_recoveries\", 1)\n- .put(\"cluster.routing.allocation.node_initial_primaries_recoveries\", 1)\n- .put(\"cluster.routing.allocation.cluster_concurrent_rebalance\", 1)\n- .build();\n- AllocationService strategy = createAllocationService(settings);\n+ public void testOutgoingThrottlesAllocation() {\n+ AllocationService strategy = createAllocationService(Settings.builder()\n+ .put(\"cluster.routing.allocation.node_concurrent_outgoing_recoveries\", 1)\n+ .build());\n+\n+ logger.info(\"Building initial routing table\");\n \n MetaData metaData = MetaData.builder()\n- .put(IndexMetaData.builder(\"test\").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0))\n+ .put(IndexMetaData.builder(\"test\").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))\n .build();\n \n- RoutingTable routingTable = RoutingTable.builder()\n- .addAsNew(metaData.index(\"test\"))\n- .build();\n+ RoutingTable routingTable = createRecoveryRoutingTable(metaData.index(\"test\"));\n \n ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();\n \n- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode(\"node1\")).put(newNode(\"node2\")).put(newNode(\"node3\"))).build();\n+ logger.info(\"start one node, do reroute, only 1 should initialize\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode(\"node1\"))).build();\n routingTable = strategy.reroute(clusterState, \"reroute\").routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));\n- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));\n- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node1\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node2\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node3\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));\n+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));\n \n+ logger.info(\"start initializing\");\n routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n \n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node1\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node2\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node3\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1));\n+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));\n+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));\n \n- RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node1\").iterator().next().shardId().id(), \"node1\", \"node2\")), false, false);\n- assertEquals(reroute.explanations().explanations().size(), 1);\n- assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.YES);\n- routingTable = reroute.routingTable();\n+ logger.info(\"start one more node, first non-primary should start being allocated\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode(\"node2\"))).build();\n+ routingTable = strategy.reroute(clusterState, \"reroute\").routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node1\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node2\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node3\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n \n- // outgoing throttles\n- reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node3\").iterator().next().shardId().id(), \"node3\", \"node1\")), true, false);\n- assertEquals(reroute.explanations().explanations().size(), 1);\n- assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node1\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node2\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node3\"), 0);\n+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1));\n+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));\n+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(1));\n assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n+\n+ logger.info(\"start initializing non-primary\");\n+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2));\n+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));\n+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(1));\n+ assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 0);\n+\n+ logger.info(\"start one more node, initializing second non-primary\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode(\"node3\"))).build();\n+ routingTable = strategy.reroute(clusterState, \"reroute\").routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2));\n assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));\n- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(1));\n assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));\n+ assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 1);\n+\n+ logger.info(\"start one more node\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode(\"node4\"))).build();\n+ routingTable = strategy.reroute(clusterState, \"reroute\").routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n \n- // incoming throttles\n- reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node3\").iterator().next().shardId().id(), \"node3\", \"node2\")), true, false);\n+ assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 1);\n+\n+ logger.info(\"move started non-primary to new node\");\n+ RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands(\n+ new MoveAllocationCommand(\"test\", 0, \"node2\", \"node4\")), true, false);\n assertEquals(reroute.explanations().explanations().size(), 1);\n assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE);\n+ // even though it is throttled, move command still forces allocation\n \n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node1\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node2\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node3\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 1);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n- assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2));\n- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));\n+ clusterState = ClusterState.builder(clusterState).routingResult(reroute).build();\n+ routingTable = clusterState.routingTable();\n+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1));\n assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(1));\n+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));\n assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));\n+ assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node1\"), 2);\n+ assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n+ }\n \n+ private RoutingTable createRecoveryRoutingTable(IndexMetaData indexMetaData) {\n+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();\n+ switch (randomInt(5)) {\n+ case 0: routingTableBuilder.addAsRecovery(indexMetaData); break;\n+ case 1: routingTableBuilder.addAsFromCloseToOpen(indexMetaData); break;\n+ case 2: routingTableBuilder.addAsFromDangling(indexMetaData); break;\n+ case 3: routingTableBuilder.addAsNewRestore(indexMetaData,\n+ new RestoreSource(new Snapshot(\"repo\", new SnapshotId(\"snap\", \"randomId\")), Version.CURRENT,\n+ indexMetaData.getIndex().getName()), new IntHashSet()); break;\n+ case 4: routingTableBuilder.addAsRestore(indexMetaData,\n+ new RestoreSource(new Snapshot(\"repo\", new SnapshotId(\"snap\", \"randomId\")), Version.CURRENT,\n+ indexMetaData.getIndex().getName())); break;\n+ case 5: routingTableBuilder.addAsNew(indexMetaData); break;\n+ default: throw new IndexOutOfBoundsException();\n+ }\n+\n+ return routingTableBuilder.build();\n }\n+\n }", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java", "status": "modified" } ] }
{ "body": "Index deletion requests currently use a custom acknowledgement mechanism that wait for the data nodes to actually delete the data before acknowledging the request to the client. This was initially put into place as a new index with same name could only be created if the old index was wiped as we used the index name as data folder on the data nodes. With PR #16442, we now use the index uuid as folder name which avoids collision between indices that are named the same (deleted and recreated). This allows us to get rid of the custom acknowledgment mechanism altogether and rely on the standard cluster state-based acknowledgment instead.\n\nCloses #18558\n", "comments": [ { "body": "@ywelsch and I talked about it offline, discussing the fact that this change uses two ack mechanism layered on top of each other (with two futures, two timeouts etc.) which makes things complex. We are currently evaluating whether specific index store deletion acks are needed now that we have uuids as folder names and based on that decide whether we want to invest more time here or just move to the standard cluster state based ack-ing.\n", "created_at": "2016-05-30T09:09:03Z" }, { "body": "@bleskes I've updated the PR by removing the custom ack mechanism and relying only on the standard cluster state ack.\n", "created_at": "2016-05-31T16:44:37Z" }, { "body": "LGTM. Best stats ever. Can you update the PR description/title? \n", "created_at": "2016-06-01T12:06:08Z" }, { "body": "Thanks @bleskes! I've updated title/description.\n", "created_at": "2016-06-01T13:25:56Z" } ], "number": 18602, "title": "Acknowledge index deletion requests based on standard cluster state acknowledgment" }
{ "body": "It was lost in #18602\n", "number": 18698, "review_comments": [ { "body": "I know previous behavior was to spawn a thread every time. I think it will be cleaner to return a boolean from the deletion methods to say whether the index deletion was succesful or is marked as pending and only spawn the thread for the latter.\n", "created_at": "2016-06-05T18:36:13Z" }, { "body": "can we try first without this extra counting and only add it if it's needed? or did you already see evidence for a failure without it?\n", "created_at": "2016-06-05T18:38:55Z" }, { "body": "Agree this would be a good thing to do. As this touches a lot more places in the code, I will do a follow-up.\n", "created_at": "2016-06-06T11:03:58Z" }, { "body": "yes, this makes all the difference on Windows (tested it in a VM) where pending deletes actually delete some more stuff, in contrast to Linux where we have everything deleted but nevertheless schedule a pending delete as we run into FileNotFoundException when doing IOUtils.rm(...) due to concurrent deletions of same data.\n", "created_at": "2016-06-06T11:10:04Z" }, { "body": "😢 OK.. can we add comments on this and the corresponding public method to say we need it for testing and why?\n", "created_at": "2016-06-06T12:41:24Z" } ], "title": "Add back pending deletes" }
{ "commits": [ { "message": "Add back pending deletes\n\nTriggering the pending deletes logic was accidentally removed in the clean up PR #18602." } ], "files": [ { "diff": "@@ -112,6 +112,7 @@\n import java.util.concurrent.Executors;\n import java.util.concurrent.TimeUnit;\n import java.util.concurrent.atomic.AtomicBoolean;\n+import java.util.concurrent.atomic.AtomicInteger;\n import java.util.function.Predicate;\n import java.util.stream.Collectors;\n \n@@ -141,6 +142,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i\n private final CircuitBreakerService circuitBreakerService;\n private volatile Map<String, IndexService> indices = emptyMap();\n private final Map<Index, List<PendingDelete>> pendingDeletes = new HashMap<>();\n+ private final AtomicInteger numUncompletedDeletes = new AtomicInteger();\n private final OldShardsStats oldShardsStats = new OldShardsStats();\n private final IndexStoreConfig indexStoreConfig;\n private final MapperRegistry mapperRegistry;\n@@ -782,6 +784,7 @@ private void addPendingDelete(Index index, PendingDelete pendingDelete) {\n pendingDeletes.put(index, list);\n }\n list.add(pendingDelete);\n+ numUncompletedDeletes.incrementAndGet();\n }\n }\n \n@@ -840,6 +843,7 @@ public void processPendingDeletes(Index index, IndexSettings indexSettings, Time\n logger.debug(\"{} processing pending deletes\", index);\n final long startTimeNS = System.nanoTime();\n final List<ShardLock> shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis());\n+ int numRemoved = 0;\n try {\n Map<ShardId, ShardLock> locks = new HashMap<>();\n for (ShardLock lock : shardLocks) {\n@@ -850,6 +854,7 @@ public void processPendingDeletes(Index index, IndexSettings indexSettings, Time\n remove = pendingDeletes.remove(index);\n }\n if (remove != null && remove.isEmpty() == false) {\n+ numRemoved = remove.size();\n CollectionUtil.timSort(remove); // make sure we delete indices first\n final long maxSleepTimeMs = 10 * 1000; // ensure we retry after 10 sec\n long sleepTime = 10;\n@@ -896,6 +901,10 @@ public void processPendingDeletes(Index index, IndexSettings indexSettings, Time\n }\n } finally {\n IOUtils.close(shardLocks);\n+ if (numRemoved > 0) {\n+ int remainingUncompletedDeletes = numUncompletedDeletes.addAndGet(-numRemoved);\n+ assert remainingUncompletedDeletes >= 0;\n+ }\n }\n }\n \n@@ -909,6 +918,14 @@ int numPendingDeletes(Index index) {\n }\n }\n \n+ /**\n+ * Checks if all pending deletes have completed. Used by tests to ensure we don't check directory contents while deletion still ongoing.\n+ * The reason is that, on Windows, browsing the directory contents can interfere with the deletion process and delay it unnecessarily.\n+ */\n+ public boolean hasUncompletedPendingDeletes() {\n+ return numUncompletedDeletes.get() > 0;\n+ }\n+\n /**\n * Returns this nodes {@link IndicesQueriesRegistry}\n */", "filename": "core/src/main/java/org/elasticsearch/indices/IndicesService.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.indices.cluster;\n \n import com.carrotsearch.hppc.cursors.ObjectCursor;\n+import org.apache.lucene.store.LockObtainFailedException;\n import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.cluster.ClusterChangedEvent;\n import org.elasticsearch.cluster.ClusterState;\n@@ -41,11 +42,14 @@\n import org.elasticsearch.common.logging.ESLogger;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.Callback;\n+import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.gateway.GatewayService;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.IndexService;\n+import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.IndexShardAlreadyExistsException;\n import org.elasticsearch.index.NodeServicesProvider;\n import org.elasticsearch.index.mapper.DocumentMapper;\n@@ -67,14 +71,15 @@\n import org.elasticsearch.threadpool.ThreadPool;\n \n import java.io.IOException;\n-import java.util.ArrayList;\n import java.util.Arrays;\n import java.util.HashSet;\n import java.util.Iterator;\n import java.util.List;\n import java.util.Map;\n import java.util.Set;\n import java.util.concurrent.ConcurrentMap;\n+import java.util.concurrent.TimeUnit;\n+import java.util.concurrent.atomic.AtomicInteger;\n \n /**\n *\n@@ -213,11 +218,14 @@ private void applyDeletedIndices(final ClusterChangedEvent event) {\n logger.debug(\"[{}] cleaning index, no longer part of the metadata\", index);\n }\n final IndexService idxService = indicesService.indexService(index);\n+ final IndexSettings indexSettings;\n if (idxService != null) {\n+ indexSettings = idxService.getIndexSettings();\n deleteIndex(index, \"index no longer part of the metadata\");\n } else if (previousState.metaData().hasIndex(index.getName())) {\n // The deleted index was part of the previous cluster state, but not loaded on the local node\n final IndexMetaData metaData = previousState.metaData().index(index);\n+ indexSettings = new IndexSettings(metaData, settings);\n indicesService.deleteUnassignedIndex(\"deleted index was not assigned to local node\", metaData, event.state());\n } else {\n // The previous cluster state's metadata also does not contain the index,\n@@ -227,7 +235,35 @@ private void applyDeletedIndices(final ClusterChangedEvent event) {\n // First, though, verify the precondition for applying this case by\n // asserting that the previous cluster state is not initialized/recovered.\n assert previousState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);\n- indicesService.verifyIndexIsDeleted(index, event.state());\n+ final IndexMetaData metaData = indicesService.verifyIndexIsDeleted(index, event.state());\n+ if (metaData != null) {\n+ indexSettings = new IndexSettings(metaData, settings);\n+ } else {\n+ indexSettings = null;\n+ }\n+ }\n+ if (indexSettings != null) {\n+ threadPool.generic().execute(new AbstractRunnable() {\n+ @Override\n+ public void onFailure(Throwable t) {\n+ logger.warn(\"[{}] failed to complete pending deletion for index\", t, index);\n+ }\n+\n+ @Override\n+ protected void doRun() throws Exception {\n+ try {\n+ // we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store to the\n+ // master. If we can't acquire the locks here immediately there might be a shard of this index still holding on to the lock\n+ // due to a \"currently canceled recovery\" or so. The shard will delete itself BEFORE the lock is released so it's guaranteed to be\n+ // deleted by the time we get the lock\n+ indicesService.processPendingDeletes(index, indexSettings, new TimeValue(30, TimeUnit.MINUTES));\n+ } catch (LockObtainFailedException exc) {\n+ logger.warn(\"[{}] failed to lock all shards for index - timed out after 30 seconds\", index);\n+ } catch (InterruptedException e) {\n+ logger.warn(\"[{}] failed to lock all shards for index - interrupted\", index);\n+ }\n+ }\n+ });\n }\n }\n ", "filename": "core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java", "status": "modified" }, { "diff": "@@ -70,7 +70,6 @@\n import java.util.concurrent.CopyOnWriteArrayList;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ExecutionException;\n-import java.util.concurrent.TimeUnit;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicInteger;\n \n@@ -586,8 +585,8 @@ public void testIndexWithShadowReplicasCleansUp() throws Exception {\n \n logger.info(\"--> deleting index \" + IDX);\n assertAcked(client().admin().indices().prepareDelete(IDX));\n-\n- // assertBusy(() -> assertPathHasBeenCleared(dataPath), 1, TimeUnit.MINUTES);\n+ assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class));\n+ assertPathHasBeenCleared(dataPath);\n //norelease\n //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved.\n //assertIndicesDirsDeleted(nodes);\n@@ -647,8 +646,8 @@ public void run() {\n assertHitCount(resp, docCount);\n \n assertAcked(client().admin().indices().prepareDelete(IDX));\n-\n- // assertBusy(() -> assertPathHasBeenCleared(dataPath), 1, TimeUnit.MINUTES);\n+ assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class));\n+ assertPathHasBeenCleared(dataPath);\n //norelease\n //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved.\n //assertIndicesDirsDeleted(nodes);\n@@ -839,8 +838,8 @@ public void testDeletingClosedIndexRemovesFiles() throws Exception {\n \n logger.info(\"--> deleting closed index\");\n client().admin().indices().prepareDelete(IDX).get();\n-\n- assertBusy(() -> assertPathHasBeenCleared(dataPath), 1, TimeUnit.MINUTES);\n+ assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class));\n+ assertPathHasBeenCleared(dataPath);\n assertIndicesDirsDeleted(nodes);\n }\n ", "filename": "core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java", "status": "modified" }, { "diff": "@@ -556,7 +556,8 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {\n SearchResponse response = client().prepareSearch(\"test\").get();\n assertHitCount(response, 1L);\n client().admin().indices().prepareDelete(\"test\").get();\n- assertBusyPathHasBeenCleared(idxPath);\n+ assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));\n+ assertPathHasBeenCleared(idxPath);\n }\n \n public void testExpectedShardSizeIsPresent() throws InterruptedException {\n@@ -639,8 +640,9 @@ public void testIndexCanChangeCustomDataPath() throws Exception {\n assertThat(\"found the hit\", resp.getHits().getTotalHits(), equalTo(1L));\n \n assertAcked(client().admin().indices().prepareDelete(INDEX));\n- assertBusyPathHasBeenCleared(startDir.toAbsolutePath());\n- assertBusyPathHasBeenCleared(endDir.toAbsolutePath());\n+ assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));\n+ assertPathHasBeenCleared(startDir.toAbsolutePath());\n+ assertPathHasBeenCleared(endDir.toAbsolutePath());\n }\n \n public void testShardStats() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java", "status": "modified" }, { "diff": "@@ -190,20 +190,24 @@ public void testPendingTasks() throws Exception {\n assertTrue(path.exists());\n \n assertEquals(indicesService.numPendingDeletes(test.index()), numPending);\n+ assertTrue(indicesService.hasUncompletedPendingDeletes());\n \n // shard lock released... we can now delete\n indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));\n assertEquals(indicesService.numPendingDeletes(test.index()), 0);\n+ assertFalse(indicesService.hasUncompletedPendingDeletes());\n assertFalse(path.exists());\n \n if (randomBoolean()) {\n indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());\n indicesService.addPendingDelete(new ShardId(test.index(), 1), test.getIndexSettings());\n indicesService.addPendingDelete(new ShardId(\"bogus\", \"_na_\", 1), test.getIndexSettings());\n assertEquals(indicesService.numPendingDeletes(test.index()), 2);\n+ assertTrue(indicesService.hasUncompletedPendingDeletes());\n // shard lock released... we can now delete\n indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));\n assertEquals(indicesService.numPendingDeletes(test.index()), 0);\n+ assertFalse(indicesService.hasUncompletedPendingDeletes());\n }\n assertAcked(client().admin().indices().prepareOpen(\"test\"));\n ", "filename": "core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java", "status": "modified" }, { "diff": "@@ -56,6 +56,7 @@\n import org.elasticsearch.env.NodeEnvironment;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.analysis.AnalysisService;\n+import org.elasticsearch.indices.IndicesService;\n import org.elasticsearch.indices.analysis.AnalysisModule;\n import org.elasticsearch.search.MockSearchService;\n import org.elasticsearch.test.junit.listeners.LoggingListener;\n@@ -672,11 +673,11 @@ public static boolean assertionsEnabled() {\n return enabled;\n }\n \n- /**\n- * Asserts busily that there are no files in the specified path\n- */\n- public void assertBusyPathHasBeenCleared(Path path) throws Exception {\n- assertBusy(() -> assertPathHasBeenCleared(path));\n+ public void assertAllIndicesRemovedAndDeletionCompleted(Iterable<IndicesService> indicesServices) throws Exception {\n+ for (IndicesService indicesService : indicesServices) {\n+ assertBusy(() -> assertFalse(indicesService.iterator().hasNext()), 1, TimeUnit.MINUTES);\n+ assertBusy(() -> assertFalse(indicesService.hasUncompletedPendingDeletes()), 1, TimeUnit.MINUTES);\n+ }\n }\n \n /**", "filename": "test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 5.0.0-alpha2\n\n**JVM version**: 1.8.0_91(1.8.0_91-b14)\n\n**OS version**: Red Hat Enterprise Linux 7.2 (3.10.0-327.18.2.el7.x86_64)\n\n**Description of the problem including expected versus actual behavior**:\nI am trying to use AWS IAM role with Elasticsearch 5.0.0-alpha2 and EC2 discovery plug-in, but it does not seem to be working and I am getting below error:\n\n> \"Exception while retrieving instance list from AWS API: Authorization header or parameters are not formatted correctly. (Service: AmazonEC2; Status Code: 401; Error Code: AuthFailure\"\n\nI am using below configuration with jdk8:\n\n> cluster.name: \"test-cluster\"\n> cloud.aws.region: \"us-west-2\"\n> cloud.aws.ec2.region: \"us-west-2\"\n> cloud.aws.ec2.protocol: \"http\"\n> discovery.type: \"ec2\"\n> # bootstrap.mlockall: true\n> \n> node.master: true\n> node.data: false\n> node.name: ${HOSTNAME}-Master\n> discovery.zen.minimum_master_nodes: 1\n> network.host: ec2:privateIp\n> discovery.ec2.any_group: true\n> discovery.ec2.groups : sg-9d856tfe\n\nAnd, below is IAM role permission that I have configured with elasticsearch instance:\n\n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"ec2:Describe*\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"*\"\n ]\n }\n ]\n}\n```\n\nHowever, same configuration works fine with with Elasticsearch 2.3 version. Has anyone faced the same issue with the latest Elasticsearch version 5.0.0-alpha2?\n\nAlso, today I enabled the debug mode of AWS call and I could notice(see below) that its loading credentials from **StaticCredentialsProvider** - its wrong behavior. As access and secret key is absent in the config(elasticsearch.yml) file, so ideally it should load credential from **InstanceProfileCredentialsProvider**.\n\n> [DEBUG][com.amazonaws.auth.AWSCredentialsProviderChain] Loading credentials from com.amazonaws.internal.StaticCredentialsProvider@40bf7b26\n\n**Steps to reproduce**:\n1. Start elasticsearch master data node with the above mentioned configuration.\n\n**Provide logs (if relevant)**:\n\n```\ncom.amazonaws.AmazonServiceException: Authorization header or parameters are not formatted correctly. (Service: AmazonEC2; Status Code: 401; Error Code: AuthFailure; Request ID: )\n at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1239)\n at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:823)\n at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:506)\n at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:318)\n at com.amazonaws.services.ec2.AmazonEC2Client.invoke(AmazonEC2Client.java:11901)\n at com.amazonaws.services.ec2.AmazonEC2Client.describeInstances(AmazonEC2Client.java:5940)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider.fetchDynamicNodes(AwsEc2UnicastHostsProvider.java:117)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider$DiscoNodesCache.refresh(AwsEc2UnicastHostsProvider.java:232)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider$DiscoNodesCache.refresh(AwsEc2UnicastHostsProvider.java:217)\n at org.elasticsearch.common.util.SingleObjectCache.getOrRefresh(SingleObjectCache.java:54)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider.buildDynamicNodes(AwsEc2UnicastHostsProvider.java:103)\n at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.sendPings(UnicastZenPing.java:344)\n at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.ping(UnicastZenPing.java:249)\n at org.elasticsearch.discovery.zen.ping.ZenPingService.ping(ZenPingService.java:106)\n at org.elasticsearch.discovery.zen.ping.ZenPingService.pingAndWait(ZenPingService.java:84)\n at org.elasticsearch.discovery.zen.ZenDiscovery.findMaster(ZenDiscovery.java:845)\n at org.elasticsearch.discovery.zen.ZenDiscovery.innerJoinCluster(ZenDiscovery.java:376)\n at org.elasticsearch.discovery.zen.ZenDiscovery.access$4500(ZenDiscovery.java:89)\n at org.elasticsearch.discovery.zen.ZenDiscovery$JoinThreadControl$1.run(ZenDiscovery.java:1166)\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:392)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n", "comments": [ { "body": "Thanks Alex for fixing this issue. Looks like that was the issue even though key was empty, but not null, it was going to else flow and looking for credentials in **StaticCredentialsProvider**.\n\nLet me re-install the **discovery-ec2** plug-in and test this. I will let you know, if any issue. Thanks again.\n", "created_at": "2016-05-31T16:17:34Z" }, { "body": "Hi Alex,\nHow can I test your fix? I have removed the discovery-ec2 plug-in and installed again(as shown below), but getting the same error as above.\n\n> bin/elasticsearch-plugin remove discovery-ec2\n> bin/elasticsearch-plugin install discovery-ec2\n\nCan you please help me here so that I can get the latest change w.r.t. discovery-ec2 plug-in? Thanks.\n", "created_at": "2016-05-31T16:40:10Z" }, { "body": "Thanks a lot @randhirkr. I added you to the pioneer program.\n", "created_at": "2016-05-31T17:18:48Z" }, { "body": "@dadoonet do we have to be added to the pioneer program to get these fixes? If so, I'd like to get on too as I'm facing this issue as well. Also, what additional configuration will I have to setup WRT the pioneer program? Thanks. \n", "created_at": "2016-07-14T20:08:17Z" }, { "body": "The PR has not been merged yet so this is not fixed. :(\n", "created_at": "2016-07-14T20:43:58Z" }, { "body": "@dexterous see https://www.elastic.co/blog/elastic-pioneer-program\n", "created_at": "2016-07-15T08:45:14Z" }, { "body": "@randhirkr I am having the exact same problem, what is the fix.\n", "created_at": "2016-11-04T14:39:15Z" } ], "number": 18652, "title": "AWS IAM role not working with Elasticsearch 5.0.0-alpha2, but works with 2.3 version" }
{ "body": "Fix EC2 discovery setting\n\nCloses #18652\n\nFollow up for #18662\n\nWe add some tests to check that settings are correctly applied.\nTests revealed that some checks were missing.\n\nAnother PR will come after for S3 repositories but it's a bit more complex for repositories.\n", "number": 18690, "review_comments": [ { "body": "Same concern about reproducibility as in the other PR.\n", "created_at": "2016-06-14T22:11:53Z" }, { "body": "You are throwing away the stack trace here. Just have this method throw Exception, and the tests that call it as well.\n", "created_at": "2016-07-22T21:42:05Z" }, { "body": "You can use expectThrows()\n", "created_at": "2016-07-22T21:42:33Z" } ], "title": "Fix EC2 discovery settings" }
{ "commits": [ { "message": "Fix EC2 discovery setting\n\nCloses #18652" }, { "message": "Fix ec2 settings\n\nFollow up for #18662\n\nWe add some tests to check that settings are correctly applied.\nTests revealed that some checks were missing.\n\nBut we ignore `testAWSCredentialsWithSystemProviders` test for now." } ], "files": [ { "diff": "@@ -44,7 +44,11 @@ dependencyLicenses {\n \n test {\n // this is needed for insecure plugins, remove if possible!\n- systemProperty 'tests.artifact', project.name \n+ systemProperty 'tests.artifact', project.name\n+ // this could be needed by AwsEc2ServiceImplTests#testAWSCredentialsWithSystemProviders()\n+ // As it's marked as Ignored for now, we can comment those\n+ // systemProperty 'aws.accessKeyId', 'DUMMY_ACCESS_KEY'\n+ // systemProperty 'aws.secretKey', 'DUMMY_SECRET_KEY'\n }\n \n thirdPartyAudit.excludes = [", "filename": "plugins/discovery-ec2/build.gradle", "status": "modified" }, { "diff": "@@ -39,6 +39,7 @@\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.component.AbstractLifecycleComponent;\n import org.elasticsearch.common.inject.Inject;\n+import org.elasticsearch.common.logging.ESLogger;\n import org.elasticsearch.common.network.NetworkService;\n import org.elasticsearch.common.settings.Settings;\n \n@@ -66,15 +67,45 @@ public synchronized AmazonEC2 client() {\n return client;\n }\n \n+ this.client = new AmazonEC2Client(buildCredentials(logger, settings), buildConfiguration(logger, settings));\n+ String endpoint = findEndpoint(logger, settings);\n+ if (endpoint != null) {\n+ client.setEndpoint(endpoint);\n+ }\n+\n+ return this.client;\n+ }\n+\n+ protected static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings) {\n+ AWSCredentialsProvider credentials;\n+\n+ String key = CLOUD_EC2.KEY_SETTING.get(settings);\n+ String secret = CLOUD_EC2.SECRET_SETTING.get(settings);\n+ if (key.isEmpty() && secret.isEmpty()) {\n+ logger.debug(\"Using either environment variables, system properties or instance profile credentials\");\n+ credentials = new AWSCredentialsProviderChain(\n+ new EnvironmentVariableCredentialsProvider(),\n+ new SystemPropertiesCredentialsProvider(),\n+ new InstanceProfileCredentialsProvider()\n+ );\n+ } else {\n+ logger.debug(\"Using basic key/secret credentials\");\n+ credentials = new AWSCredentialsProviderChain(\n+ new StaticCredentialsProvider(new BasicAWSCredentials(key, secret))\n+ );\n+ }\n+\n+ return credentials;\n+ }\n+\n+ protected static ClientConfiguration buildConfiguration(ESLogger logger, Settings settings) {\n ClientConfiguration clientConfiguration = new ClientConfiguration();\n // the response metadata cache is only there for diagnostics purposes,\n // but can force objects from every response to the old generation.\n clientConfiguration.setResponseMetadataCacheSize(0);\n clientConfiguration.setProtocol(CLOUD_EC2.PROTOCOL_SETTING.get(settings));\n- String key = CLOUD_EC2.KEY_SETTING.get(settings);\n- String secret = CLOUD_EC2.SECRET_SETTING.get(settings);\n \n- if (CLOUD_EC2.PROXY_HOST_SETTING.exists(settings)) {\n+ if (PROXY_HOST_SETTING.exists(settings) || CLOUD_EC2.PROXY_HOST_SETTING.exists(settings)) {\n String proxyHost = CLOUD_EC2.PROXY_HOST_SETTING.get(settings);\n Integer proxyPort = CLOUD_EC2.PROXY_PORT_SETTING.get(settings);\n String proxyUsername = CLOUD_EC2.PROXY_USERNAME_SETTING.get(settings);\n@@ -97,78 +128,86 @@ public synchronized AmazonEC2 client() {\n // Increase the number of retries in case of 5xx API responses\n final Random rand = Randomness.get();\n RetryPolicy retryPolicy = new RetryPolicy(\n- RetryPolicy.RetryCondition.NO_RETRY_CONDITION,\n- new RetryPolicy.BackoffStrategy() {\n- @Override\n- public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,\n- AmazonClientException exception,\n- int retriesAttempted) {\n- // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000)\n- logger.warn(\"EC2 API request failed, retry again. Reason was:\", exception);\n- return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble()));\n- }\n- },\n- 10,\n- false);\n+ RetryPolicy.RetryCondition.NO_RETRY_CONDITION,\n+ new RetryPolicy.BackoffStrategy() {\n+ @Override\n+ public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,\n+ AmazonClientException exception,\n+ int retriesAttempted) {\n+ // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000)\n+ logger.warn(\"EC2 API request failed, retry again. Reason was:\", exception);\n+ return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble()));\n+ }\n+ },\n+ 10,\n+ false);\n clientConfiguration.setRetryPolicy(retryPolicy);\n \n- AWSCredentialsProvider credentials;\n-\n- if (key == null && secret == null) {\n- credentials = new AWSCredentialsProviderChain(\n- new EnvironmentVariableCredentialsProvider(),\n- new SystemPropertiesCredentialsProvider(),\n- new InstanceProfileCredentialsProvider()\n- );\n- } else {\n- credentials = new AWSCredentialsProviderChain(\n- new StaticCredentialsProvider(new BasicAWSCredentials(key, secret))\n- );\n- }\n-\n- this.client = new AmazonEC2Client(credentials, clientConfiguration);\n+ return clientConfiguration;\n+ }\n \n+ protected static String findEndpoint(ESLogger logger, Settings settings) {\n+ String endpoint = null;\n if (CLOUD_EC2.ENDPOINT_SETTING.exists(settings)) {\n- final String endpoint = CLOUD_EC2.ENDPOINT_SETTING.get(settings);\n+ endpoint = CLOUD_EC2.ENDPOINT_SETTING.get(settings);\n logger.debug(\"using explicit ec2 endpoint [{}]\", endpoint);\n- client.setEndpoint(endpoint);\n- } else if (CLOUD_EC2.REGION_SETTING.exists(settings)) {\n+ } else if (REGION_SETTING.exists(settings) || CLOUD_EC2.REGION_SETTING.exists(settings)) {\n final String region = CLOUD_EC2.REGION_SETTING.get(settings);\n- final String endpoint;\n- if (region.equals(\"us-east-1\") || region.equals(\"us-east\")) {\n- endpoint = \"ec2.us-east-1.amazonaws.com\";\n- } else if (region.equals(\"us-west\") || region.equals(\"us-west-1\")) {\n- endpoint = \"ec2.us-west-1.amazonaws.com\";\n- } else if (region.equals(\"us-west-2\")) {\n- endpoint = \"ec2.us-west-2.amazonaws.com\";\n- } else if (region.equals(\"ap-southeast\") || region.equals(\"ap-southeast-1\")) {\n- endpoint = \"ec2.ap-southeast-1.amazonaws.com\";\n- } else if (region.equals(\"us-gov-west\") || region.equals(\"us-gov-west-1\")) {\n- endpoint = \"ec2.us-gov-west-1.amazonaws.com\";\n- } else if (region.equals(\"ap-south-1\")) {\n- endpoint = \"ec2.ap-south-1.amazonaws.com\";\n- } else if (region.equals(\"ap-southeast-2\")) {\n- endpoint = \"ec2.ap-southeast-2.amazonaws.com\";\n- } else if (region.equals(\"ap-northeast\") || region.equals(\"ap-northeast-1\")) {\n- endpoint = \"ec2.ap-northeast-1.amazonaws.com\";\n- } else if (region.equals(\"ap-northeast-2\")) {\n- endpoint = \"ec2.ap-northeast-2.amazonaws.com\";\n- } else if (region.equals(\"eu-west\") || region.equals(\"eu-west-1\")) {\n- endpoint = \"ec2.eu-west-1.amazonaws.com\";\n- } else if (region.equals(\"eu-central\") || region.equals(\"eu-central-1\")) {\n- endpoint = \"ec2.eu-central-1.amazonaws.com\";\n- } else if (region.equals(\"sa-east\") || region.equals(\"sa-east-1\")) {\n- endpoint = \"ec2.sa-east-1.amazonaws.com\";\n- } else if (region.equals(\"cn-north\") || region.equals(\"cn-north-1\")) {\n- endpoint = \"ec2.cn-north-1.amazonaws.com.cn\";\n- } else {\n- throw new IllegalArgumentException(\"No automatic endpoint could be derived from region [\" + region + \"]\");\n+ switch (region) {\n+ case \"us-east-1\":\n+ case \"us-east\":\n+ endpoint = \"ec2.us-east-1.amazonaws.com\";\n+ break;\n+ case \"us-west\":\n+ case \"us-west-1\":\n+ endpoint = \"ec2.us-west-1.amazonaws.com\";\n+ break;\n+ case \"us-west-2\":\n+ endpoint = \"ec2.us-west-2.amazonaws.com\";\n+ break;\n+ case \"ap-southeast\":\n+ case \"ap-southeast-1\":\n+ endpoint = \"ec2.ap-southeast-1.amazonaws.com\";\n+ break;\n+ case \"ap-south-1\":\n+ endpoint = \"ec2.ap-south-1.amazonaws.com\";\n+ break;\n+ case \"us-gov-west\":\n+ case \"us-gov-west-1\":\n+ endpoint = \"ec2.us-gov-west-1.amazonaws.com\";\n+ break;\n+ case \"ap-southeast-2\":\n+ endpoint = \"ec2.ap-southeast-2.amazonaws.com\";\n+ break;\n+ case \"ap-northeast\":\n+ case \"ap-northeast-1\":\n+ endpoint = \"ec2.ap-northeast-1.amazonaws.com\";\n+ break;\n+ case \"ap-northeast-2\":\n+ endpoint = \"ec2.ap-northeast-2.amazonaws.com\";\n+ break;\n+ case \"eu-west\":\n+ case \"eu-west-1\":\n+ endpoint = \"ec2.eu-west-1.amazonaws.com\";\n+ break;\n+ case \"eu-central\":\n+ case \"eu-central-1\":\n+ endpoint = \"ec2.eu-central-1.amazonaws.com\";\n+ break;\n+ case \"sa-east\":\n+ case \"sa-east-1\":\n+ endpoint = \"ec2.sa-east-1.amazonaws.com\";\n+ break;\n+ case \"cn-north\":\n+ case \"cn-north-1\":\n+ endpoint = \"ec2.cn-north-1.amazonaws.com.cn\";\n+ break;\n+ default:\n+ throw new IllegalArgumentException(\"No automatic endpoint could be derived from region [\" + region + \"]\");\n }\n logger.debug(\"using ec2 region [{}], with endpoint [{}]\", region, endpoint);\n- client.setEndpoint(endpoint);\n }\n-\n- return this.client;\n+ return endpoint;\n }\n \n @Override", "filename": "plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java", "status": "modified" }, { "diff": "@@ -0,0 +1,169 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.cloud.aws;\n+\n+import com.amazonaws.ClientConfiguration;\n+import com.amazonaws.Protocol;\n+import com.amazonaws.auth.AWSCredentials;\n+import com.amazonaws.auth.AWSCredentialsProvider;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.test.ESTestCase;\n+\n+import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.is;\n+import static org.hamcrest.Matchers.nullValue;\n+\n+public class AwsEc2ServiceImplTests extends ESTestCase {\n+\n+ @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/19556\")\n+ public void testAWSCredentialsWithSystemProviders() {\n+ AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, Settings.EMPTY);\n+\n+ AWSCredentials credentials = credentialsProvider.getCredentials();\n+ assertThat(credentials.getAWSAccessKeyId(), is(\"DUMMY_ACCESS_KEY\"));\n+ assertThat(credentials.getAWSSecretKey(), is(\"DUMMY_SECRET_KEY\"));\n+ }\n+\n+ public void testAWSCredentialsWithElasticsearchAwsSettings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.KEY_SETTING.getKey(), \"aws_key\")\n+ .put(AwsEc2Service.SECRET_SETTING.getKey(), \"aws_secret\")\n+ .build();\n+ launchAWSCredentialsWithElasticsearchSettingsTest(settings, \"aws_key\", \"aws_secret\");\n+ }\n+\n+ public void testAWSCredentialsWithElasticsearchEc2Settings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey(), \"ec2_key\")\n+ .put(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey(), \"ec2_secret\")\n+ .build();\n+ launchAWSCredentialsWithElasticsearchSettingsTest(settings, \"ec2_key\", \"ec2_secret\");\n+ }\n+\n+ public void testAWSCredentialsWithElasticsearchAwsAndEc2Settings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.KEY_SETTING.getKey(), \"aws_key\")\n+ .put(AwsEc2Service.SECRET_SETTING.getKey(), \"aws_secret\")\n+ .put(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey(), \"ec2_key\")\n+ .put(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey(), \"ec2_secret\")\n+ .build();\n+ launchAWSCredentialsWithElasticsearchSettingsTest(settings, \"ec2_key\", \"ec2_secret\");\n+ }\n+\n+ protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings settings, String expectedKey, String expectedSecret) {\n+ AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, settings).getCredentials();\n+ assertThat(credentials.getAWSAccessKeyId(), is(expectedKey));\n+ assertThat(credentials.getAWSSecretKey(), is(expectedSecret));\n+ }\n+\n+ public void testAWSDefaultConfiguration() {\n+ launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, null);\n+ }\n+\n+ public void testAWSConfigurationWithAwsSettings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.PROTOCOL_SETTING.getKey(), \"http\")\n+ .put(AwsEc2Service.PROXY_HOST_SETTING.getKey(), \"aws_proxy_host\")\n+ .put(AwsEc2Service.PROXY_PORT_SETTING.getKey(), 8080)\n+ .put(AwsEc2Service.PROXY_USERNAME_SETTING.getKey(), \"aws_proxy_username\")\n+ .put(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey(), \"aws_proxy_password\")\n+ .put(AwsEc2Service.SIGNER_SETTING.getKey(), \"AWS3SignerType\")\n+ .build();\n+ launchAWSConfigurationTest(settings, Protocol.HTTP, \"aws_proxy_host\", 8080, \"aws_proxy_username\", \"aws_proxy_password\",\n+ \"AWS3SignerType\");\n+ }\n+\n+ public void testAWSConfigurationWithAwsAndEc2Settings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.PROTOCOL_SETTING.getKey(), \"http\")\n+ .put(AwsEc2Service.PROXY_HOST_SETTING.getKey(), \"aws_proxy_host\")\n+ .put(AwsEc2Service.PROXY_PORT_SETTING.getKey(), 8080)\n+ .put(AwsEc2Service.PROXY_USERNAME_SETTING.getKey(), \"aws_proxy_username\")\n+ .put(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey(), \"aws_proxy_password\")\n+ .put(AwsEc2Service.SIGNER_SETTING.getKey(), \"AWS3SignerType\")\n+ .put(AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING.getKey(), \"https\")\n+ .put(AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING.getKey(), \"ec2_proxy_host\")\n+ .put(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.getKey(), 8081)\n+ .put(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.getKey(), \"ec2_proxy_username\")\n+ .put(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey(), \"ec2_proxy_password\")\n+ .put(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.getKey(), \"NoOpSignerType\")\n+ .build();\n+ launchAWSConfigurationTest(settings, Protocol.HTTPS, \"ec2_proxy_host\", 8081, \"ec2_proxy_username\", \"ec2_proxy_password\",\n+ \"NoOpSignerType\");\n+ }\n+\n+ protected void launchAWSConfigurationTest(Settings settings,\n+ Protocol expectedProtocol,\n+ String expectedProxyHost,\n+ int expectedProxyPort,\n+ String expectedProxyUsername,\n+ String expectedProxyPassword,\n+ String expectedSigner) {\n+ ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, settings);\n+\n+ assertThat(configuration.getResponseMetadataCacheSize(), is(0));\n+ assertThat(configuration.getProtocol(), is(expectedProtocol));\n+ assertThat(configuration.getProxyHost(), is(expectedProxyHost));\n+ assertThat(configuration.getProxyPort(), is(expectedProxyPort));\n+ assertThat(configuration.getProxyUsername(), is(expectedProxyUsername));\n+ assertThat(configuration.getProxyPassword(), is(expectedProxyPassword));\n+ assertThat(configuration.getSignerOverride(), is(expectedSigner));\n+ }\n+\n+ public void testDefaultEndpoint() {\n+ String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, Settings.EMPTY);\n+ assertThat(endpoint, nullValue());\n+ }\n+\n+ public void testSpecificEndpoint() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.getKey(), \"ec2.endpoint\")\n+ .build();\n+ String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);\n+ assertThat(endpoint, is(\"ec2.endpoint\"));\n+ }\n+\n+ public void testRegionWithAwsSettings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.REGION_SETTING.getKey(), randomFrom(\"eu-west\", \"eu-west-1\"))\n+ .build();\n+ String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);\n+ assertThat(endpoint, is(\"ec2.eu-west-1.amazonaws.com\"));\n+ }\n+\n+ public void testRegionWithAwsAndEc2Settings() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.REGION_SETTING.getKey(), randomFrom(\"eu-west\", \"eu-west-1\"))\n+ .put(AwsEc2Service.CLOUD_EC2.REGION_SETTING.getKey(), randomFrom(\"us-west\", \"us-west-1\"))\n+ .build();\n+ String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);\n+ assertThat(endpoint, is(\"ec2.us-west-1.amazonaws.com\"));\n+ }\n+\n+ public void testInvalidRegion() {\n+ Settings settings = Settings.builder()\n+ .put(AwsEc2Service.REGION_SETTING.getKey(), \"does-not-exist\")\n+ .build();\n+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {\n+ AwsEc2ServiceImpl.findEndpoint(logger, settings);\n+ });\n+ assertThat(e.getMessage(), containsString(\"No automatic endpoint could be derived from region\"));\n+ }\n+}", "filename": "plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImplTests.java", "status": "added" } ] }
{ "body": "Index deletion requests currently use a custom acknowledgement mechanism that wait for the data nodes to actually delete the data before acknowledging the request to the client. This was initially put into place as a new index with same name could only be created if the old index was wiped as we used the index name as data folder on the data nodes. With PR #16442, we now use the index uuid as folder name which avoids collision between indices that are named the same (deleted and recreated). This allows us to get rid of the custom acknowledgment mechanism altogether and rely on the standard cluster state-based acknowledgment instead.\n\nCloses #18558\n", "comments": [ { "body": "@ywelsch and I talked about it offline, discussing the fact that this change uses two ack mechanism layered on top of each other (with two futures, two timeouts etc.) which makes things complex. We are currently evaluating whether specific index store deletion acks are needed now that we have uuids as folder names and based on that decide whether we want to invest more time here or just move to the standard cluster state based ack-ing.\n", "created_at": "2016-05-30T09:09:03Z" }, { "body": "@bleskes I've updated the PR by removing the custom ack mechanism and relying only on the standard cluster state ack.\n", "created_at": "2016-05-31T16:44:37Z" }, { "body": "LGTM. Best stats ever. Can you update the PR description/title? \n", "created_at": "2016-06-01T12:06:08Z" }, { "body": "Thanks @bleskes! I've updated title/description.\n", "created_at": "2016-06-01T13:25:56Z" } ], "number": 18602, "title": "Acknowledge index deletion requests based on standard cluster state acknowledgment" }
{ "body": "Relates to #18602\n", "number": 18681, "review_comments": [ { "body": "is this always used in an assertBusy context? wonder if we should add it here. This can be suprising...\n", "created_at": "2016-06-01T14:49:42Z" }, { "body": "At the moment it's only used in these (few) integration tests. I was debating this as well, but I think the method is also useful in unit tests.\n", "created_at": "2016-06-01T14:52:40Z" }, { "body": "we can maybe have an assertPathHasBeenCleared variant? then people will hopefully see both and choose :)\n", "created_at": "2016-06-01T14:54:19Z" }, { "body": "you mean `assertBusyPathHasBeenCleared`?\n", "created_at": "2016-06-01T14:55:53Z" }, { "body": "yeah, sorry.\n", "created_at": "2016-06-01T15:00:23Z" } ], "title": "[TEST] Fix tests that rely on assumption that data dirs are removed after index deletion acknowledged" }
{ "commits": [ { "message": "[TEST] Fix tests that rely on assumption that data dirs are removed after index deletion\nacknowledged\n\nRelates to #18602" } ], "files": [ { "diff": "@@ -586,7 +586,7 @@ public void testIndexWithShadowReplicasCleansUp() throws Exception {\n logger.info(\"--> deleting index \" + IDX);\n assertAcked(client().admin().indices().prepareDelete(IDX));\n \n- assertPathHasBeenCleared(dataPath);\n+ assertBusyPathHasBeenCleared(dataPath);\n //norelease\n //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved.\n //assertIndicesDirsDeleted(nodes);\n@@ -647,7 +647,7 @@ public void run() {\n \n assertAcked(client().admin().indices().prepareDelete(IDX));\n \n- assertPathHasBeenCleared(dataPath);\n+ assertBusyPathHasBeenCleared(dataPath);\n //norelease\n //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved.\n //assertIndicesDirsDeleted(nodes);\n@@ -839,7 +839,7 @@ public void testDeletingClosedIndexRemovesFiles() throws Exception {\n logger.info(\"--> deleting closed index\");\n client().admin().indices().prepareDelete(IDX).get();\n \n- assertPathHasBeenCleared(dataPath);\n+ assertBusyPathHasBeenCleared(dataPath);\n assertIndicesDirsDeleted(nodes);\n }\n ", "filename": "core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java", "status": "modified" }, { "diff": "@@ -556,7 +556,7 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {\n SearchResponse response = client().prepareSearch(\"test\").get();\n assertHitCount(response, 1L);\n client().admin().indices().prepareDelete(\"test\").get();\n- assertPathHasBeenCleared(idxPath);\n+ assertBusyPathHasBeenCleared(idxPath);\n }\n \n public void testExpectedShardSizeIsPresent() throws InterruptedException {\n@@ -639,8 +639,8 @@ public void testIndexCanChangeCustomDataPath() throws Exception {\n assertThat(\"found the hit\", resp.getHits().getTotalHits(), equalTo(1L));\n \n assertAcked(client().admin().indices().prepareDelete(INDEX));\n- assertPathHasBeenCleared(startDir.toAbsolutePath().toString());\n- assertPathHasBeenCleared(endDir.toAbsolutePath().toString());\n+ assertBusyPathHasBeenCleared(startDir.toAbsolutePath());\n+ assertBusyPathHasBeenCleared(endDir.toAbsolutePath());\n }\n \n public void testShardStats() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java", "status": "modified" }, { "diff": "@@ -70,6 +70,7 @@\n import org.junit.rules.RuleChain;\n \n import java.io.IOException;\n+import java.io.UncheckedIOException;\n import java.nio.file.DirectoryStream;\n import java.nio.file.Files;\n import java.nio.file.Path;\n@@ -672,16 +673,16 @@ public static boolean assertionsEnabled() {\n }\n \n /**\n- * Asserts that there are no files in the specified path\n+ * Asserts busily that there are no files in the specified path\n */\n- public void assertPathHasBeenCleared(String path) throws Exception {\n- assertPathHasBeenCleared(PathUtils.get(path));\n+ public void assertBusyPathHasBeenCleared(Path path) throws Exception {\n+ assertBusy(() -> assertPathHasBeenCleared(path));\n }\n \n /**\n * Asserts that there are no files in the specified path\n */\n- public void assertPathHasBeenCleared(Path path) throws Exception {\n+ public void assertPathHasBeenCleared(Path path) {\n logger.info(\"--> checking that [{}] has been cleared\", path);\n int count = 0;\n StringBuilder sb = new StringBuilder();\n@@ -702,6 +703,8 @@ public void assertPathHasBeenCleared(Path path) throws Exception {\n sb.append(\"\\n\");\n }\n }\n+ } catch (IOException e) {\n+ throw new UncheckedIOException(e);\n }\n }\n sb.append(\"]\");", "filename": "test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java", "status": "modified" } ] }
{ "body": "ThreadPool.java creates a background thread to updated an estimated clock time at an interval that is controlled by a configuration setting called `estimated_time_interval`.\nDue to the way the component's settings are parsed this must be declared in configuration with the name `threadpool.estimated_time_interval` - BUT, earlier in the constructor of this class the call here throws an error if this name is defined in the configuration:\n\n```\nMap<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);\n```\n\nThe reason is that the getGroups call checks that all settings prefixed with `threadpool.` must be \"grouped\" i.e. qualified with the name of a threadpool e.g. `threadpool.search.size`. \nConsequently we cannot possibly change the default setting of this interval and the need to do so has emerged as part of controlling search timeouts (see \"Setting timer accuracy\" section of https://github.com/elasticsearch/elasticsearch/issues/9156). A [PR](https://github.com/elasticsearch/elasticsearch/pull/9168) for this timeout issue includes a fix that changes the setting name to `threadpool.search.estimated_time_interval`. \n", "comments": [ { "body": "@markharwood is this issue still relevant?\n", "created_at": "2015-12-03T19:48:05Z" }, { "body": "This will be addressed by #18674.\n", "created_at": "2016-06-06T17:14:43Z" } ], "number": 9216, "title": "ThreadPool.java looks for a config setting it itself prohibits" }
{ "body": "This commit refactors the handling of thread pool settings so that the\nindividual settings can be registered rather than registering the top\nlevel group. With this refactoring, individual plugins must now register\ntheir own settings for custom thread pools that they need, but a\ndedicated API is provided for this in the thread pool module. This\ncommit also renames the prefix on the thread pool settings from\n\"threadpool\" to \"thread_pool\". This enables a hard break on the settings\nso that:\n- some of the settings can be given more sensible names (e.g., the max\n number of threads in a scaling thread pool is now named \"max\" instead\n of \"size\")\n- change the soft limit on the number of threads in the bulk and\n indexing thread pools to a hard limit\n- the settings names for custom plugins for thread pools can be\n prefixed (e.g., \"xpack.watcher.thread_pool.size\")\n- thread pool settings are now node-level settings\n\nRelates #18613, closes #9216\n", "number": 18674, "review_comments": [ { "body": "`Setting.Property.Dynamic,` can go away\n", "created_at": "2016-06-02T15:52:31Z" }, { "body": "newlines FTW?\n", "created_at": "2016-06-02T15:54:33Z" }, { "body": "do we really need to extend this class? I wanna get rid of it? We should set things up in a ctor and impl. Closeable IMO\n", "created_at": "2016-06-02T15:55:58Z" }, { "body": "You get a newline, and you get a newline, and you get a newline, everyone gets a newline! :smile:\n\nRemoved in 2f292470dab843c946642dc1db94bb5e8343e4f7.\n", "created_at": "2016-06-02T15:57:30Z" }, { "body": "these look awesome. Can we get some java docs?\n", "created_at": "2016-06-02T15:59:19Z" }, { "body": "man can we find a better name for `core` I really struggle with what it means?\n", "created_at": "2016-06-02T15:59:38Z" }, { "body": "that said, I don't have a better name\n", "created_at": "2016-06-02T15:59:49Z" }, { "body": "Core is a pretty common name, it comes directly from the Java `ThreadPoolExecutor` on which our executors are based. \n", "created_at": "2016-06-02T16:00:41Z" }, { "body": "That was a mistake, I removed them from the scaling executor settings but forget them here. I'm glad that you noticed. Removed in 76026f17dba3c88b721381323093aaa07f34b9a8.\n", "created_at": "2016-06-02T16:01:18Z" }, { "body": "maybe add an overload to `Setting#intSetting`?\n", "created_at": "2016-06-02T16:05:31Z" }, { "body": "can we get javadocs?\n", "created_at": "2016-06-02T16:06:15Z" }, { "body": "It's tricky. I want to construct the executors map once, making it effectively final (although it can not be actually final right now since we do not build it in the constructor). Given this desire, we can not build it in the constructor because we need to wait until all plugins have had a chance to register their custom thread pool settings. But the thread pool instance is needed earlier than this happens because it's needed for the `MonitorService` which is in turn needed for the `NodeModule` which must be built before we start processing plugins. The timeline is thus:\n\n constructor runs < inject into MonitorService < inject into NodeModule < plugins process modules < start thread pool to create executors\n\nI looked at options around this but I didn't like any of them, they were all super hacky and involved registering suppliers that used the injector.\n", "created_at": "2016-06-02T16:06:15Z" }, { "body": "I'd prefer `getSettings` \n", "created_at": "2016-06-02T16:06:34Z" }, { "body": "to me that method means the settings get registered and I ignore the return type? :)\n", "created_at": "2016-06-02T16:06:55Z" }, { "body": "is this the `build()` method? it should be named that way?\n", "created_at": "2016-06-02T16:07:17Z" }, { "body": "another option would be to just fail if it's too high?\n", "created_at": "2016-06-02T16:08:09Z" }, { "body": "only used once so maybe a local var is easier ie. inline\n", "created_at": "2016-06-02T16:08:45Z" }, { "body": "maybe these settings classes should just accept a `Settings` instance in their ctors? or have a second one?\n", "created_at": "2016-06-02T16:10:17Z" }, { "body": "ok leave some docs then ++\n", "created_at": "2016-06-02T16:11:47Z" }, { "body": "can we just get rid of custom threadpools instead?\n", "created_at": "2016-06-02T16:14:39Z" }, { "body": "I pushed 070e260eadafc0282ecdff9f474b1ad4eaa47c6a.\n", "created_at": "2016-06-02T16:31:29Z" }, { "body": "I pushed 070e260eadafc0282ecdff9f474b1ad4eaa47c6a.\n", "created_at": "2016-06-02T16:31:42Z" }, { "body": "I pushed 070e260eadafc0282ecdff9f474b1ad4eaa47c6a.\n", "created_at": "2016-06-02T16:32:16Z" }, { "body": "I think bounding based on the number of cores makes sense (I think we might even want to consider one plus the number of cores to give an extra schedulable thread in case another thread is blocked waiting on I/O or a lock). I think going above this does not make any sense.\n", "created_at": "2016-06-02T16:35:04Z" }, { "body": "I pushed b2fb135eb31c05a75067962cb45ac9fea84670e4.\n", "created_at": "2016-06-02T16:46:57Z" }, { "body": "I pushed 070e260eadafc0282ecdff9f474b1ad4eaa47c6a.\n", "created_at": "2016-06-02T16:47:20Z" }, { "body": "This means these settings classes need to be inner classes to have access to the settings objects and I'm not sure if it's really worth it?\n", "created_at": "2016-06-02T16:48:43Z" }, { "body": "I pushed 070e260eadafc0282ecdff9f474b1ad4eaa47c6a.\n", "created_at": "2016-06-02T16:49:27Z" }, { "body": "I'm not sure since this is the only use and I don't really see broader use?\n", "created_at": "2016-06-02T16:50:06Z" }, { "body": "I pushed a1e3cb30c3d5b4f5c25eeca8acdc5e188b4175b2.\n", "created_at": "2016-06-02T16:58:47Z" } ], "title": "Register thread pool settings" }
{ "commits": [ { "message": "Register thread pool settings\n\nThis commit refactors the handling of thread pool settings so that the\nindividual settings can be registered rather than registering the top\nlevel group. With this refactoring, individual plugins must now register\ntheir own settings for custom thread pools that they need, but a\ndedicated API is provided for this in the thread pool module. This\ncommit also renames the prefix on the thread pool settings from\n\"threadpool\" to \"thread_pool\". This enables a hard break on the settings\nso that:\n - some of the settings can be given more sensible names (e.g., the max\n number of threads in a scaling thread pool is now named \"max\" instead\n of \"size\")\n - change the soft limit on the number of threads in the bulk and\n indexing thread pools to a hard limit\n - the settings names for custom plugins for thread pools can be\n prefixed (e.g., \"xpack.watcher.thread_pool.size\")" }, { "message": "Merge branch 'master' into thread-pool-refactor\n\n* master:\n Update reindex.asciidoc (#18687)\n Add more logging to reindex rethrottle\n [TEST] Increase timeout to wait on folder deletion in IndexWithShadowReplicasIT\n [TEST] Fix tests that rely on assumption that data dirs are removed after index deletion (#18681)\n Acknowledge index deletion requests based on standard cluster state acknowledgment (#18602)\n Register \"cloud.node.auto_attributes\" setting (#18678)\n Fix StoreRecoveryTests after 6.0.1 upgrade.\n Upgrade to Lucene 6.0.1.\n ingest: added `ignore_failure` option to all processors\n Throw IllegalStateException when handshake fails due to version or cluster mismatch (#18676)\n AggregatorBuilder and PipelineAggregatorBuilder do not need generics. #18368\n Move PageCacheRecycler into BigArrays (#18666)\n Index Template: parse and validate mappings in template when template puts\n [TEST] Set BWC version to 5.0.0-SNAP since this is it's min compat version\n Make cluster health classes immutable and have them implement Writeable instead of Streamable\n Fix javadoc that stated a throws clause that didn't exist.\n Clarify the semantics of the BlobContainer interface\n Build: Rework eclipse settings copy so it does not get automatically cleaned" }, { "message": "Remove dynamic thread pool settings\n\nThis commit removes the ability for thread pool settings to be adjusted\ndynamically. With this commit, thread pool settings are now node-level\nsettings and can not be modified via the cluster settings API." }, { "message": "Remove unused imports\n\nThis commit removes some unused imports that are no longer needed after\nrefactoring thread pool settings." }, { "message": "Modify thread pool settings prefixes\n\nThis commit modifies settings prefixes so that the name is not\nduplicated (e.g., \"xpack.watcher.thread_pool.watcher\" under the previous\ncode)." }, { "message": "Remove unused retired executors field\n\nThis commit removes the now unnecessary retired executors field from\nThreadPool after retiring executors are no longer a thing due to remove\ndynamic thread pool settings." }, { "message": "Remove volatile keyword from executors field\n\nThis commit removes the volatile keyword from the executors field; this\nis no longer needed as the executors can no longer be updated." }, { "message": "Remove newlines in o.e.t.ScalingExecutorBuilder\n\nThis commit removes some unnecessary trailing newlines from\no/e/t/ScalingExecutorBuilder.java." }, { "message": "Remove dynamic from fixed executor settings\n\nThis commit removes the dynamic properties from fixed executor settings." }, { "message": "Cleanup executor builders\n\nThis commit cleans up some simple issues for executor builders:\n - marks the implementations as final\n - restricts the visibilty of some methods\n - adds Javadocs\n - changes a couple of method names for clarity" }, { "message": "Inline queue size method in fixed executor builder\n\nThis commit inlines the queue size utility method in the fixed executor\nbuilder as it was only used in one place." }, { "message": "Add an extra schedulable bulk and indexing thread\n\nThis commit increases the hard size limit on bulk and indexing threads\nto one plus the bounded number of processors. This allows for an extra\nschedulable thread in case one of the others from the pool is blocked on\nI/O or a lock." }, { "message": "Refactor custom thread pool registration\n\nThis commit refactors custom thread pool registration. Rather than\nprocessing the thread pool module in an onModule method on plugins, we\nsimply ask the plugins for their custom thread pool registration. This\nsimplifies custom thread pool registration and ultimately thread pool\nconstruction." }, { "message": "Revert import order change in AbstractClient\n\nThis commit reverts an inadvertent import order chance in\nAbstractClient.java." }, { "message": "Merge branch 'master' into thread-pool-refactor\n\n* master: (22 commits)\n Fix recovery throttling to properly handle relocating non-primary shards (#18701)\n Fix merge stats rendering in RestIndicesAction (#18720)\n [TEST] mute RandomAllocationDeciderTests.testRandomDecisions\n Reworked docs for index-shrink API (#18705)\n Improve painless compile-time exceptions\n Adds UUIDs to snapshots\n Add test rethrottle test case for delete-by-query\n Do not start scheduled pings until transport start\n Adressing review comments\n Only filter intial recovery (post API) when shrinking an index (#18661)\n Add tests to check that toQuery() doesn't return null\n Removing handling of null lucene query where we catch this at parse time\n Handle empty query bodies at parse time and remove EmptyQueryBuilder\n Mute failing assertions in IndexWithShadowReplicasIT until fix\n Remove allow running as root\n Add upgrade-not-supported warning to alpha release notes\n remove unrecognized javadoc tag from matrix aggregation module\n set ValuesSourceConfig fields as private\n Adding MultiValuesSource support classes and documentation to matrix stats agg module\n New Matrix Stats Aggregation module\n ..." }, { "message": "Revert method visibility to package-private\n\nThis commit reverts the method AbstractScopedSettings#addSettingsUpdater\nto being package-private. This method was previously made public for an\nearlier refactoring but that refactoring has been undone after making\nthread pool settings non-dynamic." }, { "message": "Add prefix to method names in ExecutorBuilder\n\nThis commit renames two methods in ExecutorBuilder to add \"get\" prefixes\nto their method names." }, { "message": "Add Javadocs for plugin custom thread pools\n\nThis commit adds Javadocs for the extension point for plugins to\nregister custom thread pools." }, { "message": "Register estimated time interval setting\n\nThis commit registers the estimated time interval setting, the interval\nat which the estimated time thread updates the estimated time." } ], "files": [ { "diff": "@@ -190,7 +190,6 @@ public void apply(Settings value, Settings current, Settings previous) {\n RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,\n RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,\n RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,\n- ThreadPool.THREADPOOL_GROUP_SETTING,\n ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,\n ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,\n ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,\n@@ -419,6 +418,7 @@ public void apply(Settings value, Settings current, Settings previous) {\n ResourceWatcherService.RELOAD_INTERVAL_HIGH,\n ResourceWatcherService.RELOAD_INTERVAL_MEDIUM,\n ResourceWatcherService.RELOAD_INTERVAL_LOW,\n- SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING\n+ SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING,\n+ ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING\n )));\n }", "filename": "core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java", "status": "modified" }, { "diff": "@@ -83,13 +83,16 @@ public static String threadName(Settings settings, String ... names) {\n }\n \n public static String threadName(Settings settings, String namePrefix) {\n- String name = settings.get(\"node.name\");\n- if (name == null) {\n- name = \"elasticsearch\";\n+ String nodeName = settings.get(\"node.name\");\n+ if (nodeName == null) {\n+ return threadName(\"\", namePrefix);\n } else {\n- name = \"elasticsearch[\" + name + \"]\";\n+ return threadName(nodeName, namePrefix);\n }\n- return name + \"[\" + namePrefix + \"]\";\n+ }\n+\n+ public static String threadName(final String nodeName, final String namePrefix) {\n+ return \"elasticsearch\" + (nodeName.isEmpty() ? \"\" : \"[\") + nodeName + (nodeName.isEmpty() ? \"\" : \"]\") + \"[\" + namePrefix + \"]\";\n }\n \n public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) {", "filename": "core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java", "status": "modified" }, { "diff": "@@ -98,6 +98,7 @@\n import org.elasticsearch.snapshots.SnapshotShardsService;\n import org.elasticsearch.snapshots.SnapshotsService;\n import org.elasticsearch.tasks.TaskResultsService;\n+import org.elasticsearch.threadpool.ExecutorBuilder;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.threadpool.ThreadPoolModule;\n import org.elasticsearch.transport.TransportService;\n@@ -210,18 +211,20 @@ protected Node(Environment tmpEnv, Version version, Collection<Class<? extends P\n throw new IllegalStateException(\"Failed to created node environment\", ex);\n }\n final NetworkService networkService = new NetworkService(settings);\n- final ThreadPool threadPool = new ThreadPool(settings);\n+ final List<ExecutorBuilder<?>> executorBuilders = pluginsService.getExecutorBuilders(settings);\n+ final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0]));\n+\n NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();\n boolean success = false;\n try {\n- final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);\n ModulesBuilder modules = new ModulesBuilder();\n modules.add(new Version.Module(version));\n modules.add(new CircuitBreakerModule(settings));\n // plugin modules must be added here, before others or we can get crazy injection errors...\n for (Module pluginModule : pluginsService.nodeModules()) {\n modules.add(pluginModule);\n }\n+ final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);\n modules.add(new PluginsModule(pluginsService));\n SettingsModule settingsModule = new SettingsModule(this.settings);\n modules.add(settingsModule);\n@@ -232,7 +235,8 @@ protected Node(Environment tmpEnv, Version version, Collection<Class<? extends P\n modules.add(scriptModule);\n modules.add(new NodeEnvironmentModule(nodeEnvironment));\n modules.add(new ClusterNameModule(this.settings));\n- modules.add(new ThreadPoolModule(threadPool));\n+ final ThreadPoolModule threadPoolModule = new ThreadPoolModule(threadPool);\n+ modules.add(threadPoolModule);\n modules.add(new DiscoveryModule(this.settings));\n modules.add(new ClusterModule(this.settings));\n modules.add(new IndicesModule());\n@@ -246,11 +250,14 @@ protected Node(Environment tmpEnv, Version version, Collection<Class<? extends P\n modules.add(new AnalysisModule(environment));\n \n pluginsService.processModules(modules);\n+\n scriptModule.prepareSettings(settingsModule);\n+\n+ threadPoolModule.prepareSettings(settingsModule);\n+\n injector = modules.createInjector();\n \n client = injector.getInstance(Client.class);\n- threadPool.setClusterSettings(injector.getInstance(ClusterSettings.class));\n success = true;\n } catch (IOException ex) {\n throw new ElasticsearchException(\"failed to bind service\", ex);", "filename": "core/src/main/java/org/elasticsearch/node/Node.java", "status": "modified" }, { "diff": "@@ -23,9 +23,12 @@\n import org.elasticsearch.common.inject.Module;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.IndexModule;\n+import org.elasticsearch.threadpool.ExecutorBuilder;\n+import org.elasticsearch.threadpool.ThreadPool;\n \n import java.util.Collection;\n import java.util.Collections;\n+import java.util.List;\n \n /**\n * An extension point allowing to plug in custom functionality.\n@@ -80,4 +83,15 @@ public void onIndexModule(IndexModule indexModule) {}\n */\n @Deprecated\n public final void onModule(IndexModule indexModule) {}\n+\n+ /**\n+ * Provides the list of this plugin's custom thread pools, empty if\n+ * none.\n+ *\n+ * @param settings the current settings\n+ * @return executors builders for this plugin's custom thread pools\n+ */\n+ public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {\n+ return Collections.emptyList();\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/plugins/Plugin.java", "status": "modified" }, { "diff": "@@ -40,6 +40,7 @@\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.IndexModule;\n+import org.elasticsearch.threadpool.ExecutorBuilder;\n \n import java.io.IOException;\n import java.lang.reflect.InvocationTargetException;\n@@ -261,6 +262,14 @@ public Collection<Module> nodeModules() {\n return modules;\n }\n \n+ public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {\n+ final ArrayList<ExecutorBuilder<?>> builders = new ArrayList<>();\n+ for (final Tuple<PluginInfo, Plugin> plugin : plugins) {\n+ builders.addAll(plugin.v2().getExecutorBuilders(settings));\n+ }\n+ return builders;\n+ }\n+\n public Collection<Class<? extends LifecycleComponent>> nodeServices() {\n List<Class<? extends LifecycleComponent>> services = new ArrayList<>();\n for (Tuple<PluginInfo, Plugin> plugin : plugins) {", "filename": "core/src/main/java/org/elasticsearch/plugins/PluginsService.java", "status": "modified" }, { "diff": "@@ -0,0 +1,91 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.threadpool;\n+\n+import org.elasticsearch.common.settings.Setting;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.util.concurrent.ThreadContext;\n+\n+import java.util.List;\n+\n+/**\n+ * Base class for executor builders.\n+ *\n+ * @param <U> the underlying type of the executor settings\n+ */\n+public abstract class ExecutorBuilder<U extends ExecutorBuilder.ExecutorSettings> {\n+\n+ private final String name;\n+\n+ public ExecutorBuilder(String name) {\n+ this.name = name;\n+ }\n+\n+ protected String name() {\n+ return name;\n+ }\n+\n+ protected static String settingsKey(final String prefix, final String key) {\n+ return String.join(\".\", prefix, key);\n+ }\n+\n+ /**\n+ * The list of settings this builder will register.\n+ *\n+ * @return the list of registered settings\n+ */\n+ abstract List<Setting<?>> getRegisteredSettings();\n+\n+ /**\n+ * Return an executor settings object from the node-level settings.\n+ *\n+ * @param settings the node-level settings\n+ * @return the executor settings object\n+ */\n+ abstract U getSettings(Settings settings);\n+\n+ /**\n+ * Builds the executor with the specified executor settings.\n+ *\n+ * @param settings the executor settings\n+ * @param threadContext the current thread context\n+ * @return a new executor built from the specified executor settings\n+ */\n+ abstract ThreadPool.ExecutorHolder build(U settings, ThreadContext threadContext);\n+\n+ /**\n+ * Format the thread pool info object for this executor.\n+ *\n+ * @param info the thread pool info object to format\n+ * @return a formatted thread pool info (useful for logging)\n+ */\n+ abstract String formatInfo(ThreadPool.Info info);\n+\n+ static abstract class ExecutorSettings {\n+\n+ protected final String nodeName;\n+\n+ public ExecutorSettings(String nodeName) {\n+ this.nodeName = nodeName;\n+ }\n+\n+ }\n+\n+}", "filename": "core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java", "status": "added" }, { "diff": "@@ -0,0 +1,135 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.threadpool;\n+\n+import org.elasticsearch.common.settings.Setting;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.SizeValue;\n+import org.elasticsearch.common.util.concurrent.EsExecutors;\n+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;\n+import org.elasticsearch.common.util.concurrent.ThreadContext;\n+import org.elasticsearch.node.Node;\n+\n+import java.util.Arrays;\n+import java.util.List;\n+import java.util.Locale;\n+import java.util.concurrent.Executor;\n+import java.util.concurrent.ThreadFactory;\n+\n+/**\n+ * A builder for fixed executors.\n+ */\n+public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBuilder.FixedExecutorSettings> {\n+\n+ private final Setting<Integer> sizeSetting;\n+ private final Setting<Integer> queueSizeSetting;\n+\n+ /**\n+ * Construct a fixed executor builder; the settings will have the\n+ * key prefix \"thread_pool.\" followed by the executor name.\n+ *\n+ * @param settings the node-level settings\n+ * @param name the name of the executor\n+ * @param size the fixed number of threads\n+ * @param queueSize the size of the backing queue, -1 for unbounded\n+ */\n+ FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) {\n+ this(settings, name, size, queueSize, \"thread_pool.\" + name);\n+ }\n+\n+ /**\n+ * Construct a fixed executor builder.\n+ *\n+ * @param settings the node-level settings\n+ * @param name the name of the executor\n+ * @param size the fixed number of threads\n+ * @param queueSize the size of the backing queue, -1 for unbounded\n+ * @param prefix the prefix for the settings keys\n+ */\n+ public FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) {\n+ super(name);\n+ final String sizeKey = settingsKey(prefix, \"size\");\n+ this.sizeSetting =\n+ new Setting<>(\n+ sizeKey,\n+ s -> Integer.toString(size),\n+ s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey),\n+ Setting.Property.NodeScope);\n+ final String queueSizeKey = settingsKey(prefix, \"queue_size\");\n+ this.queueSizeSetting =\n+ Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope);\n+ }\n+\n+ private int applyHardSizeLimit(final Settings settings, final String name) {\n+ if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {\n+ return 1 + EsExecutors.boundedNumberOfProcessors(settings);\n+ } else {\n+ return Integer.MAX_VALUE;\n+ }\n+ }\n+\n+ @Override\n+ List<Setting<?>> getRegisteredSettings() {\n+ return Arrays.asList(sizeSetting, queueSizeSetting);\n+ }\n+\n+ @Override\n+ FixedExecutorSettings getSettings(Settings settings) {\n+ final String nodeName = Node.NODE_NAME_SETTING.get(settings);\n+ final int size = sizeSetting.get(settings);\n+ final int queueSize = queueSizeSetting.get(settings);\n+ return new FixedExecutorSettings(nodeName, size, queueSize);\n+ }\n+\n+ @Override\n+ ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) {\n+ int size = settings.size;\n+ int queueSize = settings.queueSize;\n+ final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name()));\n+ Executor executor = EsExecutors.newFixed(name(), size, queueSize, threadFactory, threadContext);\n+ final ThreadPool.Info info =\n+ new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize));\n+ return new ThreadPool.ExecutorHolder(executor, info);\n+ }\n+\n+ @Override\n+ String formatInfo(ThreadPool.Info info) {\n+ return String.format(\n+ Locale.ROOT,\n+ \"name [%s], size [%d], queue size [%s]\",\n+ info.getName(),\n+ info.getMax(),\n+ info.getQueueSize() == null ? \"unbounded\" : info.getQueueSize());\n+ }\n+\n+ static class FixedExecutorSettings extends ExecutorBuilder.ExecutorSettings {\n+\n+ private final int size;\n+ private final int queueSize;\n+\n+ public FixedExecutorSettings(final String nodeName, final int size, final int queueSize) {\n+ super(nodeName);\n+ this.size = size;\n+ this.queueSize = queueSize;\n+ }\n+\n+ }\n+\n+}", "filename": "core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java", "status": "added" }, { "diff": "@@ -0,0 +1,129 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.threadpool;\n+\n+import org.elasticsearch.common.settings.Setting;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.common.util.concurrent.EsExecutors;\n+import org.elasticsearch.common.util.concurrent.ThreadContext;\n+import org.elasticsearch.node.Node;\n+\n+import java.util.Arrays;\n+import java.util.List;\n+import java.util.Locale;\n+import java.util.concurrent.Executor;\n+import java.util.concurrent.ThreadFactory;\n+import java.util.concurrent.TimeUnit;\n+\n+/**\n+ * A builder for scaling executors.\n+ */\n+public final class ScalingExecutorBuilder extends ExecutorBuilder<ScalingExecutorBuilder.ScalingExecutorSettings> {\n+\n+ private final Setting<Integer> coreSetting;\n+ private final Setting<Integer> maxSetting;\n+ private final Setting<TimeValue> keepAliveSetting;\n+\n+ /**\n+ * Construct a scaling executor builder; the settings will have the\n+ * key prefix \"thread_pool.\" followed by the executor name.\n+ *\n+ * @param name the name of the executor\n+ * @param core the minimum number of threads in the pool\n+ * @param max the maximum number of threads in the pool\n+ * @param keepAlive the time that spare threads above {@code core}\n+ * threads will be kept alive\n+ */\n+ public ScalingExecutorBuilder(final String name, final int core, final int max, final TimeValue keepAlive) {\n+ this(name, core, max, keepAlive, \"thread_pool.\" + name);\n+ }\n+\n+ /**\n+ * Construct a scaling executor builder; the settings will have the\n+ * specified key prefix.\n+ *\n+ * @param name the name of the executor\n+ * @param core the minimum number of threads in the pool\n+ * @param max the maximum number of threads in the pool\n+ * @param keepAlive the time that spare threads above {@code core}\n+ * threads will be kept alive\n+ * @param prefix the prefix for the settings keys\n+ */\n+ public ScalingExecutorBuilder(final String name, final int core, final int max, final TimeValue keepAlive, final String prefix) {\n+ super(name);\n+ this.coreSetting =\n+ Setting.intSetting(settingsKey(prefix, \"core\"), core, Setting.Property.NodeScope);\n+ this.maxSetting = Setting.intSetting(settingsKey(prefix, \"max\"), max, Setting.Property.NodeScope);\n+ this.keepAliveSetting =\n+ Setting.timeSetting(settingsKey(prefix, \"keep_alive\"), keepAlive, Setting.Property.NodeScope);\n+ }\n+\n+ @Override\n+ List<Setting<?>> getRegisteredSettings() {\n+ return Arrays.asList(coreSetting, maxSetting, keepAliveSetting);\n+ }\n+\n+ @Override\n+ ScalingExecutorSettings getSettings(Settings settings) {\n+ final String nodeName = Node.NODE_NAME_SETTING.get(settings);\n+ final int coreThreads = coreSetting.get(settings);\n+ final int maxThreads = maxSetting.get(settings);\n+ final TimeValue keepAlive = keepAliveSetting.get(settings);\n+ return new ScalingExecutorSettings(nodeName, coreThreads, maxThreads, keepAlive);\n+ }\n+\n+ ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final ThreadContext threadContext) {\n+ TimeValue keepAlive = settings.keepAlive;\n+ int core = settings.core;\n+ int max = settings.max;\n+ final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null);\n+ final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name()));\n+ final Executor executor =\n+ EsExecutors.newScaling(name(), core, max, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext);\n+ return new ThreadPool.ExecutorHolder(executor, info);\n+ }\n+\n+ @Override\n+ String formatInfo(ThreadPool.Info info) {\n+ return String.format(\n+ Locale.ROOT,\n+ \"name [%s], core [%d], max [%d], keep alive [%s]\",\n+ info.getName(),\n+ info.getMin(),\n+ info.getMax(),\n+ info.getKeepAlive());\n+ }\n+\n+ static class ScalingExecutorSettings extends ExecutorBuilder.ExecutorSettings {\n+\n+ private final int core;\n+ private final int max;\n+ private final TimeValue keepAlive;\n+\n+ public ScalingExecutorSettings(final String nodeName, final int core, final int max, final TimeValue keepAlive) {\n+ super(nodeName);\n+ this.core = core;\n+ this.max = max;\n+ this.keepAlive = keepAlive;\n+ }\n+ }\n+\n+}", "filename": "core/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java", "status": "added" }, { "diff": "@@ -26,11 +26,8 @@\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n import org.elasticsearch.common.io.stream.Streamable;\n-import org.elasticsearch.common.settings.ClusterSettings;\n import org.elasticsearch.common.settings.Setting;\n-import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.common.settings.Settings;\n-import org.elasticsearch.common.settings.SettingsException;\n import org.elasticsearch.common.unit.SizeValue;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.common.util.concurrent.EsAbortPolicy;\n@@ -45,31 +42,22 @@\n import java.io.Closeable;\n import java.io.IOException;\n import java.util.ArrayList;\n+import java.util.Collection;\n import java.util.Collections;\n import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n-import java.util.Objects;\n-import java.util.Queue;\n-import java.util.concurrent.ConcurrentLinkedQueue;\n import java.util.concurrent.Executor;\n import java.util.concurrent.ExecutorService;\n import java.util.concurrent.RejectedExecutionHandler;\n import java.util.concurrent.ScheduledExecutorService;\n import java.util.concurrent.ScheduledFuture;\n import java.util.concurrent.ScheduledThreadPoolExecutor;\n-import java.util.concurrent.ThreadFactory;\n import java.util.concurrent.ThreadPoolExecutor;\n import java.util.concurrent.TimeUnit;\n-import java.util.concurrent.atomic.AtomicBoolean;\n \n import static java.util.Collections.unmodifiableMap;\n-import static org.elasticsearch.common.unit.SizeValue.parseSizeValue;\n-import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;\n \n-/**\n- *\n- */\n public class ThreadPool extends AbstractComponent implements Closeable {\n \n public static class Names {\n@@ -146,164 +134,85 @@ public static ThreadPoolType fromType(String type) {\n THREAD_POOL_TYPES = Collections.unmodifiableMap(map);\n }\n \n- private static void add(Map<String, Settings> executorSettings, ExecutorSettingsBuilder builder) {\n- Settings settings = builder.build();\n- String name = settings.get(\"name\");\n- executorSettings.put(name, settings);\n- }\n-\n- private static abstract class ExecutorSettingsBuilder<T extends ExecutorSettingsBuilder<T>> {\n-\n- private final Settings.Builder builder;\n-\n- protected ExecutorSettingsBuilder(String name, ThreadPoolType threadPoolType) {\n- if (THREAD_POOL_TYPES.get(name) != threadPoolType) {\n- throw new IllegalArgumentException(\"thread pool [\" + name + \"] must be of type [\" + threadPoolType + \"]\");\n- }\n- builder = Settings.builder();\n- builder.put(\"name\", name);\n- builder.put(\"type\", threadPoolType.getType());\n- }\n-\n- public T keepAlive(String keepAlive) {\n- return add(\"keep_alive\", keepAlive);\n- }\n-\n- public T queueSize(int queueSize) {\n- return add(\"queue_size\", queueSize);\n- }\n-\n- protected T add(String setting, int value) {\n- return add(setting, Integer.toString(value));\n- }\n-\n-\n- protected T add(String setting, String value) {\n- builder.put(setting, value);\n- @SuppressWarnings(\"unchecked\") final T executor = (T)this;\n- return executor;\n- }\n-\n- public final Settings build() { return builder.build(); }\n-\n- }\n-\n- private static class FixedExecutorSettingsBuilder extends ExecutorSettingsBuilder<FixedExecutorSettingsBuilder> {\n-\n- public FixedExecutorSettingsBuilder(String name) {\n- super(name, ThreadPoolType.FIXED);\n- }\n-\n- public FixedExecutorSettingsBuilder size(int size) {\n- return add(\"size\", Integer.toString(size));\n- }\n-\n- }\n-\n- private static class ScalingExecutorSettingsBuilder extends ExecutorSettingsBuilder<ScalingExecutorSettingsBuilder> {\n-\n- public ScalingExecutorSettingsBuilder(String name) {\n- super(name, ThreadPoolType.SCALING);\n- }\n-\n- public ScalingExecutorSettingsBuilder min(int min) {\n- return add(\"min\", min);\n- }\n-\n-\n- public ScalingExecutorSettingsBuilder size(int size) {\n- return add(\"size\", size);\n- }\n- }\n-\n- public static final Setting<Settings> THREADPOOL_GROUP_SETTING =\n- Setting.groupSetting(\"threadpool.\", Property.Dynamic, Property.NodeScope);\n-\n- private volatile Map<String, ExecutorHolder> executors;\n-\n- private final Map<String, Settings> defaultExecutorTypeSettings;\n-\n- private final Queue<ExecutorHolder> retiredExecutors = new ConcurrentLinkedQueue<>();\n+ private Map<String, ExecutorHolder> executors = new HashMap<>();\n \n private final ScheduledThreadPoolExecutor scheduler;\n \n private final EstimatedTimeThread estimatedTimeThread;\n \n- private final AtomicBoolean settingsListenerIsSet = new AtomicBoolean(false);\n-\n static final Executor DIRECT_EXECUTOR = command -> command.run();\n \n private final ThreadContext threadContext;\n \n- public ThreadPool(String name) {\n- this(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).build());\n+ private final Map<String, ExecutorBuilder> builders;\n+\n+ public Collection<ExecutorBuilder> builders() {\n+ return Collections.unmodifiableCollection(builders.values());\n }\n \n- public ThreadPool(Settings settings) {\n+ public static Setting<TimeValue> ESTIMATED_TIME_INTERVAL_SETTING =\n+ Setting.timeSetting(\"thread_pool.estimated_time_interval\", TimeValue.timeValueMillis(200), Setting.Property.NodeScope);\n+\n+ public ThreadPool(final Settings settings, final ExecutorBuilder<?>... customBuilders) {\n super(settings);\n \n- assert Node.NODE_NAME_SETTING.exists(settings) : \"ThreadPool's settings should contain a name\";\n- threadContext = new ThreadContext(settings);\n- Map<String, Settings> groupSettings = THREADPOOL_GROUP_SETTING.get(settings).getAsGroups();\n- validate(groupSettings);\n-\n- int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);\n- int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors);\n- int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);\n- Map<String, Settings> defaultExecutorTypeSettings = new HashMap<>();\n- int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.GENERIC).min(4).size(genericThreadPoolMax).keepAlive(\"30s\"));\n- add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200));\n- add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50));\n- add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000));\n- add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.SEARCH).size(((availableProcessors * 3) / 2) + 1).queueSize(1000));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.MANAGEMENT).min(1).size(5).keepAlive(\"5m\"));\n+ assert Node.NODE_NAME_SETTING.exists(settings);\n+\n+ final Map<String, ExecutorBuilder> builders = new HashMap<>();\n+ final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);\n+ final int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors);\n+ final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);\n+ final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);\n+ builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30)));\n+ builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200));\n+ builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 50));\n+ builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000));\n+ builders.put(Names.SEARCH, new FixedExecutorBuilder(settings, Names.SEARCH, ((availableProcessors * 3) / 2) + 1, 1000));\n+ builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5)));\n // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded\n // the assumption here is that the listeners should be very lightweight on the listeners side\n- add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.LISTENER).size(halfProcMaxAt10));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FLUSH).min(1).size(halfProcMaxAt5).keepAlive(\"5m\"));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.REFRESH).min(1).size(halfProcMaxAt10).keepAlive(\"5m\"));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.WARMER).min(1).size(halfProcMaxAt5).keepAlive(\"5m\"));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.SNAPSHOT).min(1).size(halfProcMaxAt5).keepAlive(\"5m\"));\n- add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.FORCE_MERGE).size(1));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FETCH_SHARD_STARTED).min(1).size(availableProcessors * 2).keepAlive(\"5m\"));\n- add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FETCH_SHARD_STORE).min(1).size(availableProcessors * 2).keepAlive(\"5m\"));\n+ builders.put(Names.LISTENER, new FixedExecutorBuilder(settings, Names.LISTENER, halfProcMaxAt10, -1));\n+ builders.put(Names.FLUSH, new ScalingExecutorBuilder(Names.FLUSH, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5)));\n+ builders.put(Names.REFRESH, new ScalingExecutorBuilder(Names.REFRESH, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)));\n+ builders.put(Names.WARMER, new ScalingExecutorBuilder(Names.WARMER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5)));\n+ builders.put(Names.SNAPSHOT, new ScalingExecutorBuilder(Names.SNAPSHOT, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5)));\n+ builders.put(Names.FETCH_SHARD_STARTED, new ScalingExecutorBuilder(Names.FETCH_SHARD_STARTED, 1, 2 * availableProcessors, TimeValue.timeValueMinutes(5)));\n+ builders.put(Names.FORCE_MERGE, new FixedExecutorBuilder(settings, Names.FORCE_MERGE, 1, -1));\n+ builders.put(Names.FETCH_SHARD_STORE, new ScalingExecutorBuilder(Names.FETCH_SHARD_STORE, 1, 2 * availableProcessors, TimeValue.timeValueMinutes(5)));\n+ for (final ExecutorBuilder<?> builder : customBuilders) {\n+ if (builders.containsKey(builder.name())) {\n+ throw new IllegalArgumentException(\"builder with name [\" + builder.name() + \"] already exists\");\n+ }\n+ builders.put(builder.name(), builder);\n+ }\n+ this.builders = Collections.unmodifiableMap(builders);\n \n- this.defaultExecutorTypeSettings = unmodifiableMap(defaultExecutorTypeSettings);\n-\n- Map<String, ExecutorHolder> executors = new HashMap<>();\n- for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {\n- executors.put(executor.getKey(), build(executor.getKey(), groupSettings.get(executor.getKey()), executor.getValue()));\n- }\n+ threadContext = new ThreadContext(settings);\n \n- // Building custom thread pools\n- for (Map.Entry<String, Settings> entry : groupSettings.entrySet()) {\n- if (executors.containsKey(entry.getKey())) {\n- continue;\n+ final Map<String, ExecutorHolder> executors = new HashMap<>();\n+ for (@SuppressWarnings(\"unchecked\") final Map.Entry<String, ExecutorBuilder> entry : builders.entrySet()) {\n+ final ExecutorBuilder.ExecutorSettings executorSettings = entry.getValue().getSettings(settings);\n+ final ExecutorHolder executorHolder = entry.getValue().build(executorSettings, threadContext);\n+ if (executors.containsKey(executorHolder.info.getName())) {\n+ throw new IllegalStateException(\"duplicate executors with name [\" + executorHolder.info.getName() + \"] registered\");\n }\n- executors.put(entry.getKey(), build(entry.getKey(), entry.getValue(), Settings.EMPTY));\n+ logger.debug(\"created thread pool: \" + entry.getValue().formatInfo(executorHolder.info));\n+ executors.put(entry.getKey(), executorHolder);\n }\n \n executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT)));\n this.executors = unmodifiableMap(executors);\n+\n this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, \"scheduler\"), new EsAbortPolicy());\n this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);\n this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);\n this.scheduler.setRemoveOnCancelPolicy(true);\n \n- TimeValue estimatedTimeInterval = settings.getAsTime(\"threadpool.estimated_time_interval\", TimeValue.timeValueMillis(200));\n+ TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings);\n this.estimatedTimeThread = new EstimatedTimeThread(EsExecutors.threadName(settings, \"[timer]\"), estimatedTimeInterval.millis());\n this.estimatedTimeThread.start();\n }\n \n- public void setClusterSettings(ClusterSettings clusterSettings) {\n- if(settingsListenerIsSet.compareAndSet(false, true)) {\n- clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups()));\n- } else {\n- throw new IllegalStateException(\"the node settings listener was set more then once\");\n- }\n- }\n-\n public long estimatedTimeInMillis() {\n return estimatedTimeThread.estimatedTimeInMillis();\n }\n@@ -440,12 +349,6 @@ public void shutdownNow() {\n ((ThreadPoolExecutor) executor.executor()).shutdownNow();\n }\n }\n-\n- ExecutorHolder holder;\n- while ((holder = retiredExecutors.poll()) != null) {\n- ThreadPoolExecutor executor = (ThreadPoolExecutor) holder.executor();\n- executor.shutdownNow();\n- }\n }\n \n public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {\n@@ -456,142 +359,10 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE\n }\n }\n \n- ExecutorHolder holder;\n- while ((holder = retiredExecutors.poll()) != null) {\n- ThreadPoolExecutor executor = (ThreadPoolExecutor) holder.executor();\n- result &= executor.awaitTermination(timeout, unit);\n- }\n-\n estimatedTimeThread.join(unit.toMillis(timeout));\n return result;\n }\n \n- private ExecutorHolder build(String name, @Nullable Settings settings, Settings defaultSettings) {\n- return rebuild(name, null, settings, defaultSettings);\n- }\n-\n- private ExecutorHolder rebuild(String name, ExecutorHolder previousExecutorHolder, @Nullable Settings settings, Settings defaultSettings) {\n- if (Names.SAME.equals(name)) {\n- // Don't allow to change the \"same\" thread executor\n- return previousExecutorHolder;\n- }\n- if (settings == null) {\n- settings = Settings.Builder.EMPTY_SETTINGS;\n- }\n- Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null;\n- String type = settings.get(\"type\", previousInfo != null ? previousInfo.getThreadPoolType().getType() : defaultSettings.get(\"type\"));\n- ThreadPoolType threadPoolType = ThreadPoolType.fromType(type);\n- ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);\n- if (ThreadPoolType.DIRECT == threadPoolType) {\n- if (previousExecutorHolder != null) {\n- logger.debug(\"updating thread pool [{}], type [{}]\", name, type);\n- } else {\n- logger.debug(\"creating thread pool [{}], type [{}]\", name, type);\n- }\n- return new ExecutorHolder(DIRECT_EXECUTOR, new Info(name, threadPoolType));\n- } else if (ThreadPoolType.FIXED == threadPoolType) {\n- int defaultSize = defaultSettings.getAsInt(\"size\", EsExecutors.boundedNumberOfProcessors(settings));\n- SizeValue defaultQueueSize = getAsSizeOrUnbounded(defaultSettings, \"queue\", getAsSizeOrUnbounded(defaultSettings, \"queue_size\", null));\n-\n- if (previousExecutorHolder != null) {\n- assert previousInfo != null;\n- if (ThreadPoolType.FIXED == previousInfo.getThreadPoolType()) {\n- SizeValue updatedQueueSize = getAsSizeOrUnbounded(settings, \"capacity\", getAsSizeOrUnbounded(settings, \"queue\", getAsSizeOrUnbounded(settings, \"queue_size\", previousInfo.getQueueSize())));\n- if (Objects.equals(previousInfo.getQueueSize(), updatedQueueSize)) {\n- int updatedSize = applyHardSizeLimit(name, settings.getAsInt(\"size\", previousInfo.getMax()));\n- if (previousInfo.getMax() != updatedSize) {\n- logger.debug(\"updating thread pool [{}], type [{}], size [{}], queue_size [{}]\", name, type, updatedSize, updatedQueueSize);\n- // if you think this code is crazy: that's because it is!\n- if (updatedSize > previousInfo.getMax()) {\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize);\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedSize);\n- } else {\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedSize);\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize);\n- }\n- return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, updatedSize, updatedSize, null, updatedQueueSize));\n- }\n- return previousExecutorHolder;\n- }\n- }\n- if (previousInfo.getMax() >= 0) {\n- defaultSize = previousInfo.getMax();\n- }\n- defaultQueueSize = previousInfo.getQueueSize();\n- }\n-\n- int size = applyHardSizeLimit(name, settings.getAsInt(\"size\", defaultSize));\n- SizeValue queueSize = getAsSizeOrUnbounded(settings, \"capacity\", getAsSizeOrUnbounded(settings, \"queue\", getAsSizeOrUnbounded(settings, \"queue_size\", defaultQueueSize)));\n- logger.debug(\"creating thread pool [{}], type [{}], size [{}], queue_size [{}]\", name, type, size, queueSize);\n- Executor executor = EsExecutors.newFixed(name, size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory, threadContext);\n- return new ExecutorHolder(executor, new Info(name, threadPoolType, size, size, null, queueSize));\n- } else if (ThreadPoolType.SCALING == threadPoolType) {\n- TimeValue defaultKeepAlive = defaultSettings.getAsTime(\"keep_alive\", timeValueMinutes(5));\n- int defaultMin = defaultSettings.getAsInt(\"min\", 1);\n- int defaultSize = defaultSettings.getAsInt(\"size\", EsExecutors.boundedNumberOfProcessors(settings));\n- final Integer queueSize = settings.getAsInt(\"queue_size\", defaultSettings.getAsInt(\"queue_size\", null));\n- if (queueSize != null) {\n- throw new IllegalArgumentException(\"thread pool [\" + name + \"] of type scaling can not have its queue re-sized but was [\" + queueSize + \"]\");\n- }\n- if (previousExecutorHolder != null) {\n- if (ThreadPoolType.SCALING == previousInfo.getThreadPoolType()) {\n- TimeValue updatedKeepAlive = settings.getAsTime(\"keep_alive\", previousInfo.getKeepAlive());\n- int updatedMin = settings.getAsInt(\"min\", previousInfo.getMin());\n- int updatedSize = settings.getAsInt(\"max\", settings.getAsInt(\"size\", previousInfo.getMax()));\n- if (!previousInfo.getKeepAlive().equals(updatedKeepAlive) || previousInfo.getMin() != updatedMin || previousInfo.getMax() != updatedSize) {\n- logger.debug(\"updating thread pool [{}], type [{}], keep_alive [{}]\", name, type, updatedKeepAlive);\n- if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);\n- }\n- if (previousInfo.getMin() != updatedMin) {\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedMin);\n- }\n- if (previousInfo.getMax() != updatedSize) {\n- ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize);\n- }\n- return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, updatedMin, updatedSize, updatedKeepAlive, null));\n- }\n- return previousExecutorHolder;\n- }\n- if (previousInfo.getKeepAlive() != null) {\n- defaultKeepAlive = previousInfo.getKeepAlive();\n- }\n- if (previousInfo.getMin() >= 0) {\n- defaultMin = previousInfo.getMin();\n- }\n- if (previousInfo.getMax() >= 0) {\n- defaultSize = previousInfo.getMax();\n- }\n- }\n- TimeValue keepAlive = settings.getAsTime(\"keep_alive\", defaultKeepAlive);\n- int min = settings.getAsInt(\"min\", defaultMin);\n- int size = settings.getAsInt(\"max\", settings.getAsInt(\"size\", defaultSize));\n- if (previousExecutorHolder != null) {\n- logger.debug(\"updating thread pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]\", name, type, min, size, keepAlive);\n- } else {\n- logger.debug(\"creating thread pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]\", name, type, min, size, keepAlive);\n- }\n- Executor executor = EsExecutors.newScaling(name, min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext);\n- return new ExecutorHolder(executor, new Info(name, threadPoolType, min, size, keepAlive, null));\n- }\n- throw new IllegalArgumentException(\"No type found [\" + type + \"], for [\" + name + \"]\");\n- }\n-\n- private int applyHardSizeLimit(String name, int size) {\n- int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);\n- if ((name.equals(Names.BULK) || name.equals(Names.INDEX)) && size > availableProcessors) {\n- // We use a hard max size for the indexing pools, because if too many threads enter Lucene's IndexWriter, it means\n- // too many segments written, too frequently, too much merging, etc:\n- // TODO: I would love to be loud here (throw an exception if you ask for a too-big size), but I think this is dangerous\n- // because on upgrade this setting could be in cluster state and hard for the user to correct?\n- logger.warn(\"requested thread pool size [{}] for [{}] is too large; setting to maximum [{}] instead\",\n- size, name, availableProcessors);\n- size = availableProcessors;\n- }\n-\n- return size;\n- }\n-\n /**\n * Constrains a value between minimum and maximum values\n * (inclusive).\n@@ -618,92 +389,6 @@ static int twiceNumberOfProcessors(int numberOfProcessors) {\n return boundedBy(2 * numberOfProcessors, 2, Integer.MAX_VALUE);\n }\n \n- private void updateSettings(Settings settings) {\n- Map<String, Settings> groupSettings = settings.getAsGroups();\n- if (groupSettings.isEmpty()) {\n- return;\n- }\n-\n- for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {\n- Settings updatedSettings = groupSettings.get(executor.getKey());\n- if (updatedSettings == null) {\n- continue;\n- }\n-\n- ExecutorHolder oldExecutorHolder = executors.get(executor.getKey());\n- ExecutorHolder newExecutorHolder = rebuild(executor.getKey(), oldExecutorHolder, updatedSettings, executor.getValue());\n- if (!oldExecutorHolder.equals(newExecutorHolder)) {\n- Map<String, ExecutorHolder> newExecutors = new HashMap<>(executors);\n- newExecutors.put(executor.getKey(), newExecutorHolder);\n- executors = unmodifiableMap(newExecutors);\n- if (!oldExecutorHolder.executor().equals(newExecutorHolder.executor()) && oldExecutorHolder.executor() instanceof EsThreadPoolExecutor) {\n- retiredExecutors.add(oldExecutorHolder);\n- ((EsThreadPoolExecutor) oldExecutorHolder.executor()).shutdown(new ExecutorShutdownListener(oldExecutorHolder));\n- }\n- }\n- }\n-\n- // Building custom thread pools\n- for (Map.Entry<String, Settings> entry : groupSettings.entrySet()) {\n- if (defaultExecutorTypeSettings.containsKey(entry.getKey())) {\n- continue;\n- }\n-\n- ExecutorHolder oldExecutorHolder = executors.get(entry.getKey());\n- ExecutorHolder newExecutorHolder = rebuild(entry.getKey(), oldExecutorHolder, entry.getValue(), Settings.EMPTY);\n- // Can't introduce new thread pools at runtime, because The oldExecutorHolder variable will be null in the\n- // case the settings contains a thread pool not defined in the initial settings in the constructor. The if\n- // statement will then fail and so this prevents the addition of new thread groups at runtime, which is desired.\n- if (!newExecutorHolder.equals(oldExecutorHolder)) {\n- Map<String, ExecutorHolder> newExecutors = new HashMap<>(executors);\n- newExecutors.put(entry.getKey(), newExecutorHolder);\n- executors = unmodifiableMap(newExecutors);\n- if (!oldExecutorHolder.executor().equals(newExecutorHolder.executor()) && oldExecutorHolder.executor() instanceof EsThreadPoolExecutor) {\n- retiredExecutors.add(oldExecutorHolder);\n- ((EsThreadPoolExecutor) oldExecutorHolder.executor()).shutdown(new ExecutorShutdownListener(oldExecutorHolder));\n- }\n- }\n- }\n- }\n-\n- private void validate(Map<String, Settings> groupSettings) {\n- for (String key : groupSettings.keySet()) {\n- if (!THREAD_POOL_TYPES.containsKey(key)) {\n- continue;\n- }\n- String type = groupSettings.get(key).get(\"type\");\n- ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key);\n- // TODO: the type equality check can be removed after #3760/#6732 are addressed\n- if (type != null && !correctThreadPoolType.getType().equals(type)) {\n- throw new IllegalArgumentException(\"setting \" + THREADPOOL_GROUP_SETTING.getKey() + key + \".type to \" + type + \" is not permitted; must be \" + correctThreadPoolType.getType());\n- }\n- }\n- }\n-\n- /**\n- * A thread pool size can also be unbounded and is represented by -1, which is not supported by SizeValue (which only supports positive numbers)\n- */\n- private SizeValue getAsSizeOrUnbounded(Settings settings, String setting, SizeValue defaultValue) throws SettingsException {\n- if (\"-1\".equals(settings.get(setting))) {\n- return null;\n- }\n- return parseSizeValue(settings.get(setting), defaultValue);\n- }\n-\n- class ExecutorShutdownListener implements EsThreadPoolExecutor.ShutdownListener {\n-\n- private ExecutorHolder holder;\n-\n- public ExecutorShutdownListener(ExecutorHolder holder) {\n- this.holder = holder;\n- }\n-\n- @Override\n- public void onTerminated() {\n- retiredExecutors.remove(holder);\n- }\n- }\n-\n class LoggingRunnable implements Runnable {\n \n private final Runnable runnable;", "filename": "core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java", "status": "modified" }, { "diff": "@@ -20,20 +20,25 @@\n package org.elasticsearch.threadpool;\n \n import org.elasticsearch.common.inject.AbstractModule;\n+import org.elasticsearch.common.settings.SettingsModule;\n \n-/**\n- *\n- */\n public class ThreadPoolModule extends AbstractModule {\n \n private final ThreadPool threadPool;\n \n- public ThreadPoolModule(ThreadPool threadPool) {\n+ public ThreadPoolModule(final ThreadPool threadPool) {\n this.threadPool = threadPool;\n }\n \n+ public void prepareSettings(SettingsModule settingsModule) {\n+ for (final ExecutorBuilder<?> builder : threadPool.builders()) {\n+ builder.getRegisteredSettings().forEach(settingsModule::registerSetting);\n+ }\n+ }\n+\n @Override\n protected void configure() {\n bind(ThreadPool.class).toInstance(threadPool);\n }\n+\n }", "filename": "core/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java", "status": "modified" }, { "diff": "@@ -45,12 +45,12 @@ public class RejectionActionIT extends ESIntegTestCase {\n protected Settings nodeSettings(int nodeOrdinal) {\n return Settings.builder()\n .put(super.nodeSettings(nodeOrdinal))\n- .put(\"threadpool.search.size\", 1)\n- .put(\"threadpool.search.queue_size\", 1)\n- .put(\"threadpool.index.size\", 1)\n- .put(\"threadpool.index.queue_size\", 1)\n- .put(\"threadpool.get.size\", 1)\n- .put(\"threadpool.get.queue_size\", 1)\n+ .put(\"thread_pool.search.size\", 1)\n+ .put(\"thread_pool.search.queue_size\", 1)\n+ .put(\"thread_pool.index.size\", 1)\n+ .put(\"thread_pool.index.queue_size\", 1)\n+ .put(\"thread_pool.get.size\", 1)\n+ .put(\"thread_pool.get.queue_size\", 1)\n .build();\n }\n ", "filename": "core/src/test/java/org/elasticsearch/action/RejectionActionIT.java", "status": "modified" }, { "diff": "@@ -42,6 +42,7 @@\n import org.elasticsearch.tasks.TaskManager;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.tasks.MockTaskManager;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n import org.elasticsearch.transport.local.LocalTransport;\n@@ -72,7 +73,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(TransportTasksActionTests.class.getSimpleName());\n+ threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName());\n }\n \n @AfterClass", "filename": "core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java", "status": "modified" }, { "diff": "@@ -52,10 +52,10 @@ protected Settings nodeSettings(int nodeOrdinal) {\n .put(super.nodeSettings(nodeOrdinal))\n // don't mess with this one! It's quite sensitive to a low queue size\n // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected)\n- //.put(\"threadpool.listener.queue_size\", 1)\n- .put(\"threadpool.get.queue_size\", 1)\n+ //.put(\"thread_pool.listener.queue_size\", 1)\n+ .put(\"thread_pool.get.queue_size\", 1)\n // default is 50\n- .put(\"threadpool.bulk.queue_size\", 30)\n+ .put(\"thread_pool.bulk.queue_size\", 30)\n .build();\n }\n ", "filename": "core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java", "status": "modified" }, { "diff": "@@ -38,6 +38,7 @@\n import org.elasticsearch.tasks.Task;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n import org.junit.After;\n@@ -63,7 +64,7 @@ public class TransportBulkActionTookTests extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(\"TransportBulkActionTookTests\");\n+ threadPool = new TestThreadPool(\"TransportBulkActionTookTests\");\n }\n \n @AfterClass", "filename": "core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.Transports;\n \n@@ -31,7 +32,7 @@\n public class ListenableActionFutureTests extends ESTestCase {\n \n public void testListenerIsCallableFromNetworkThreads() throws Throwable {\n- ThreadPool threadPool = new ThreadPool(\"testListenerIsCallableFromNetworkThreads\");\n+ ThreadPool threadPool = new TestThreadPool(\"testListenerIsCallableFromNetworkThreads\");\n try {\n final PlainListenableActionFuture<Object> future = new PlainListenableActionFuture<>(threadPool);\n final CountDownLatch listenerCalled = new CountDownLatch(1);", "filename": "core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java", "status": "modified" }, { "diff": "@@ -55,6 +55,7 @@\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportChannel;\n import org.elasticsearch.transport.TransportResponse;\n@@ -182,7 +183,7 @@ public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {\n \n @BeforeClass\n public static void startThreadPool() {\n- THREAD_POOL = new ThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName());\n+ THREAD_POOL = new TestThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName());\n }\n \n @Before", "filename": "core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java", "status": "modified" }, { "diff": "@@ -45,6 +45,7 @@\n import org.elasticsearch.tasks.Task;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.ConnectTransportException;\n import org.elasticsearch.transport.TransportService;\n@@ -76,7 +77,7 @@ public class TransportMasterNodeActionTests extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(\"TransportMasterNodeActionTests\");\n+ threadPool = new TestThreadPool(\"TransportMasterNodeActionTests\");\n }\n \n @Override", "filename": "core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java", "status": "modified" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.common.transport.DummyTransportAddress;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n import org.junit.After;\n@@ -162,7 +163,7 @@ private enum NodeSelector {\n \n @BeforeClass\n public static void startThreadPool() {\n- THREAD_POOL = new ThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName());\n+ THREAD_POOL = new TestThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName());\n }\n \n @AfterClass", "filename": "core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java", "status": "modified" }, { "diff": "@@ -45,6 +45,7 @@\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.tasks.Task;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n import org.elasticsearch.transport.local.LocalTransport;\n@@ -81,7 +82,7 @@ public class BroadcastReplicationTests extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(\"BroadcastReplicationTests\");\n+ threadPool = new TestThreadPool(\"BroadcastReplicationTests\");\n circuitBreakerService = new NoneCircuitBreakerService();\n }\n ", "filename": "core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java", "status": "modified" }, { "diff": "@@ -59,6 +59,7 @@\n import org.elasticsearch.test.ESAllocationTestCase;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportChannel;\n import org.elasticsearch.transport.TransportException;\n@@ -111,7 +112,7 @@ public class TransportReplicationActionTests extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(\"ShardReplicationTests\");\n+ threadPool = new TestThreadPool(\"ShardReplicationTests\");\n }\n \n @Override", "filename": "core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java", "status": "modified" }, { "diff": "@@ -43,6 +43,7 @@\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.ConnectTransportException;\n import org.elasticsearch.transport.TransportException;\n@@ -133,7 +134,7 @@ public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {\n \n @BeforeClass\n public static void startThreadPool() {\n- THREAD_POOL = new ThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName());\n+ THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName());\n }\n \n @Before", "filename": "core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.transport.LocalTransportAddress;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.BaseTransportResponseHandler;\n import org.elasticsearch.transport.TransportException;\n@@ -63,7 +64,7 @@ private static class TestIteration implements Closeable {\n \n TestIteration() {\n ClusterName clusterName = new ClusterName(\"test\");\n- threadPool = new ThreadPool(\"transport-client-nodes-service-tests\");\n+ threadPool = new TestThreadPool(\"transport-client-nodes-service-tests\");\n transport = new FailAndRetryMockTransport<TestResponse>(random(), clusterName) {\n @Override\n public List<String> getLocalAddresses() {", "filename": "core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java", "status": "modified" }, { "diff": "@@ -38,6 +38,7 @@\n import org.elasticsearch.discovery.Discovery;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.NodeDisconnectedException;\n import org.elasticsearch.transport.NodeNotConnectedException;\n@@ -97,7 +98,7 @@ protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver\n \n @BeforeClass\n public static void startThreadPool() {\n- THREAD_POOL = new ThreadPool(\"ShardStateActionTest\");\n+ THREAD_POOL = new TestThreadPool(\"ShardStateActionTest\");\n }\n \n @Override", "filename": "core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java", "status": "modified" }, { "diff": "@@ -41,6 +41,7 @@\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.gateway.NoopGatewayAllocator;\n import org.elasticsearch.test.transport.CapturingTransport;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n import org.junit.After;\n@@ -72,7 +73,7 @@ public class ClusterStateHealthTests extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(\"ClusterStateHealthTests\");\n+ threadPool = new TestThreadPool(\"ClusterStateHealthTests\");\n }\n \n @Override", "filename": "core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java", "status": "modified" }, { "diff": "@@ -37,6 +37,7 @@\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.gateway.GatewayAllocator;\n import org.elasticsearch.test.ESAllocationTestCase;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.junit.After;\n import org.junit.Before;\n@@ -73,7 +74,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {\n \n @Before\n public void createDelayedAllocationService() {\n- threadPool = new ThreadPool(getTestName());\n+ threadPool = new TestThreadPool(getTestName());\n clusterService = mock(ClusterService.class);\n allocationService = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());\n delayedAllocationService = new TestDelayAllocationService(Settings.EMPTY, threadPool, clusterService, allocationService);", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java", "status": "modified" }, { "diff": "@@ -42,6 +42,7 @@\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.MockLogAppender;\n import org.elasticsearch.test.junit.annotations.TestLogging;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.junit.After;\n import org.junit.AfterClass;\n@@ -76,7 +77,7 @@ public class ClusterServiceTests extends ESTestCase {\n \n @BeforeClass\n public static void createThreadPool() {\n- threadPool = new ThreadPool(ClusterServiceTests.class.getName());\n+ threadPool = new TestThreadPool(ClusterServiceTests.class.getName());\n }\n \n @AfterClass", "filename": "core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.test.ESTestCase;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n \n import java.util.ArrayList;\n@@ -244,7 +245,7 @@ public void run() {\n }\n \n public void testTimeoutCleanup() throws Exception {\n- ThreadPool threadPool = new ThreadPool(\"test\");\n+ ThreadPool threadPool = new TestThreadPool(\"test\");\n final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler();\n final AtomicBoolean timeoutCalled = new AtomicBoolean();\n PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()), holder);", "filename": "core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java", "status": "modified" }, { "diff": "@@ -39,6 +39,7 @@\n import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.transport.MockTransportService;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportConnectionListener;\n import org.elasticsearch.transport.TransportRequestOptions;\n@@ -85,7 +86,7 @@ public void setUp() throws Exception {\n .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), new ByteSizeValue(0))\n .build();\n ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);\n- threadPool = new ThreadPool(getClass().getName());\n+ threadPool = new TestThreadPool(getClass().getName());\n clusterServiceA = createClusterService(threadPool);\n clusterServiceB = createClusterService(threadPool);\n circuitBreakerService = new HierarchyCircuitBreakerService(settings, clusterSettings);", "filename": "core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java", "status": "modified" }, { "diff": "@@ -43,6 +43,7 @@\n import org.elasticsearch.discovery.zen.membership.MembershipAction;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.junit.annotations.TestLogging;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.junit.After;\n import org.junit.AfterClass;\n@@ -81,7 +82,7 @@ public class NodeJoinControllerTests extends ESTestCase {\n \n @BeforeClass\n public static void beforeClass() {\n- threadPool = new ThreadPool(\"ShardReplicationTests\");\n+ threadPool = new TestThreadPool(\"ShardReplicationTests\");\n }\n \n @AfterClass", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java", "status": "modified" }, { "diff": "@@ -37,6 +37,7 @@\n import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.VersionUtils;\n+import org.elasticsearch.threadpool.TestThreadPool;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportConnectionListener;\n import org.elasticsearch.transport.TransportService;\n@@ -60,7 +61,7 @@ public void testSimplePings() throws InterruptedException {\n int endPort = startPort + 10;\n settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), startPort + \"-\" + endPort).build();\n \n- ThreadPool threadPool = new ThreadPool(getClass().getName());\n+ ThreadPool threadPool = new TestThreadPool(getClass().getName());\n ClusterName test = new ClusterName(\"test\");\n ClusterName mismatch = new ClusterName(\"mismatch\");\n NetworkService networkService = new NetworkService(settings);", "filename": "core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: 5.0.0-alpha2\n\n**JVM version**: 1.8.0_91(1.8.0_91-b14)\n\n**OS version**: Red Hat Enterprise Linux 7.2 (3.10.0-327.18.2.el7.x86_64)\n\n**Description of the problem including expected versus actual behavior**:\nI am trying to use AWS IAM role with Elasticsearch 5.0.0-alpha2 and EC2 discovery plug-in, but it does not seem to be working and I am getting below error:\n\n> \"Exception while retrieving instance list from AWS API: Authorization header or parameters are not formatted correctly. (Service: AmazonEC2; Status Code: 401; Error Code: AuthFailure\"\n\nI am using below configuration with jdk8:\n\n> cluster.name: \"test-cluster\"\n> cloud.aws.region: \"us-west-2\"\n> cloud.aws.ec2.region: \"us-west-2\"\n> cloud.aws.ec2.protocol: \"http\"\n> discovery.type: \"ec2\"\n> # bootstrap.mlockall: true\n> \n> node.master: true\n> node.data: false\n> node.name: ${HOSTNAME}-Master\n> discovery.zen.minimum_master_nodes: 1\n> network.host: ec2:privateIp\n> discovery.ec2.any_group: true\n> discovery.ec2.groups : sg-9d856tfe\n\nAnd, below is IAM role permission that I have configured with elasticsearch instance:\n\n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"ec2:Describe*\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"*\"\n ]\n }\n ]\n}\n```\n\nHowever, same configuration works fine with with Elasticsearch 2.3 version. Has anyone faced the same issue with the latest Elasticsearch version 5.0.0-alpha2?\n\nAlso, today I enabled the debug mode of AWS call and I could notice(see below) that its loading credentials from **StaticCredentialsProvider** - its wrong behavior. As access and secret key is absent in the config(elasticsearch.yml) file, so ideally it should load credential from **InstanceProfileCredentialsProvider**.\n\n> [DEBUG][com.amazonaws.auth.AWSCredentialsProviderChain] Loading credentials from com.amazonaws.internal.StaticCredentialsProvider@40bf7b26\n\n**Steps to reproduce**:\n1. Start elasticsearch master data node with the above mentioned configuration.\n\n**Provide logs (if relevant)**:\n\n```\ncom.amazonaws.AmazonServiceException: Authorization header or parameters are not formatted correctly. (Service: AmazonEC2; Status Code: 401; Error Code: AuthFailure; Request ID: )\n at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1239)\n at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:823)\n at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:506)\n at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:318)\n at com.amazonaws.services.ec2.AmazonEC2Client.invoke(AmazonEC2Client.java:11901)\n at com.amazonaws.services.ec2.AmazonEC2Client.describeInstances(AmazonEC2Client.java:5940)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider.fetchDynamicNodes(AwsEc2UnicastHostsProvider.java:117)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider$DiscoNodesCache.refresh(AwsEc2UnicastHostsProvider.java:232)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider$DiscoNodesCache.refresh(AwsEc2UnicastHostsProvider.java:217)\n at org.elasticsearch.common.util.SingleObjectCache.getOrRefresh(SingleObjectCache.java:54)\n at org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider.buildDynamicNodes(AwsEc2UnicastHostsProvider.java:103)\n at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.sendPings(UnicastZenPing.java:344)\n at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.ping(UnicastZenPing.java:249)\n at org.elasticsearch.discovery.zen.ping.ZenPingService.ping(ZenPingService.java:106)\n at org.elasticsearch.discovery.zen.ping.ZenPingService.pingAndWait(ZenPingService.java:84)\n at org.elasticsearch.discovery.zen.ZenDiscovery.findMaster(ZenDiscovery.java:845)\n at org.elasticsearch.discovery.zen.ZenDiscovery.innerJoinCluster(ZenDiscovery.java:376)\n at org.elasticsearch.discovery.zen.ZenDiscovery.access$4500(ZenDiscovery.java:89)\n at org.elasticsearch.discovery.zen.ZenDiscovery$JoinThreadControl$1.run(ZenDiscovery.java:1166)\n at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:392)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n", "comments": [ { "body": "Thanks Alex for fixing this issue. Looks like that was the issue even though key was empty, but not null, it was going to else flow and looking for credentials in **StaticCredentialsProvider**.\n\nLet me re-install the **discovery-ec2** plug-in and test this. I will let you know, if any issue. Thanks again.\n", "created_at": "2016-05-31T16:17:34Z" }, { "body": "Hi Alex,\nHow can I test your fix? I have removed the discovery-ec2 plug-in and installed again(as shown below), but getting the same error as above.\n\n> bin/elasticsearch-plugin remove discovery-ec2\n> bin/elasticsearch-plugin install discovery-ec2\n\nCan you please help me here so that I can get the latest change w.r.t. discovery-ec2 plug-in? Thanks.\n", "created_at": "2016-05-31T16:40:10Z" }, { "body": "Thanks a lot @randhirkr. I added you to the pioneer program.\n", "created_at": "2016-05-31T17:18:48Z" }, { "body": "@dadoonet do we have to be added to the pioneer program to get these fixes? If so, I'd like to get on too as I'm facing this issue as well. Also, what additional configuration will I have to setup WRT the pioneer program? Thanks. \n", "created_at": "2016-07-14T20:08:17Z" }, { "body": "The PR has not been merged yet so this is not fixed. :(\n", "created_at": "2016-07-14T20:43:58Z" }, { "body": "@dexterous see https://www.elastic.co/blog/elastic-pioneer-program\n", "created_at": "2016-07-15T08:45:14Z" }, { "body": "@randhirkr I am having the exact same problem, what is the fix.\n", "created_at": "2016-11-04T14:39:15Z" } ], "number": 18652, "title": "AWS IAM role not working with Elasticsearch 5.0.0-alpha2, but works with 2.3 version" }
{ "body": "`cloud.aws.ec2.access_key` has fallback setting `cloud.aws.access_key`.\n\n```\nSetting<String> KEY_SETTING = new Setting<>(\"cloud.aws.ec2.access_key\", AwsEc2Service.KEY_SETTING, Function.identity(),\n Property.NodeScope, Property.Filtered);\n```\n\n`cloud.aws.access_key`'s default value is empty string.\n\n```\nSetting<String> KEY_SETTING =\n Setting.simpleString(\"cloud.aws.access_key\", Property.NodeScope, Property.Filtered);\n```\n\n```\npublic static Setting<String> simpleString(String key, Property... properties) {\n return new Setting<>(key, s -> \"\", Function.identity(), properties);\n }\n```\n\nSame for `cloud.aws.ec2.secret_key`\n\nCloses #18652\n", "number": 18662, "review_comments": [ { "body": "If the defaults are actually empty string, then we should not be checking/allowing for null. Let the NPE happen: if it occurs, it is an internal bug. Otherwise the bug would be hidden away by this leniency. Also, you changed the logic slightly, because now it is logicl OR instead of AND. I think it should stay AND?\n", "created_at": "2016-05-31T17:24:02Z" }, { "body": "Agreed. If key is provided, secret must be provided.\n", "created_at": "2016-05-31T17:26:24Z" } ], "title": "Fix EC2 discovery settings" }
{ "commits": [ { "message": "Fix EC2 discovery setting #18652" } ], "files": [ { "diff": "@@ -117,15 +117,15 @@ public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,\n \n AWSCredentialsProvider credentials;\n \n- if (key == null && secret == null) {\n+ if (key.isEmpty() && secret.isEmpty()) {\n credentials = new AWSCredentialsProviderChain(\n- new EnvironmentVariableCredentialsProvider(),\n- new SystemPropertiesCredentialsProvider(),\n- new InstanceProfileCredentialsProvider()\n+ new EnvironmentVariableCredentialsProvider(),\n+ new SystemPropertiesCredentialsProvider(),\n+ new InstanceProfileCredentialsProvider()\n );\n } else {\n credentials = new AWSCredentialsProviderChain(\n- new StaticCredentialsProvider(new BasicAWSCredentials(key, secret))\n+ new StaticCredentialsProvider(new BasicAWSCredentials(key, secret))\n );\n }\n ", "filename": "plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java", "status": "modified" } ] }
{ "body": "I have the following dynamic mappings set up in default-mapping.json:\n\n``` javascript\n{\n \"_default_\": {\n \"_source\": {\n \"enabled\": false\n },\n \"_all\": {\n \"enabled\": false\n },\n \"dynamic_templates\": [\n {\n \"strings\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"multi_field\",\n \"fields\": {\n \"{name}\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\",\n \"omit_norms\": true,\n \"omit_term_freq_and_positions\": true\n },\n \"lower\": {\n \"type\": \"string\",\n \"index\": \"analyzed\",\n \"analyzer\": \"lowercase\",\n \"omit_norms\": true,\n \"omit_term_freq_and_positions\": true\n }\n }\n }\n }\n },\n {\n \"everything\": {\n \"match\": \"*\",\n \"mapping\": {\n \"omit_norms\": true,\n \"omit_term_freq_and_positions\": true\n }\n }\n }\n ]\n }\n}\n```\n\nIf I understand this page correctly (at the very bottom):\n\nhttp://www.elasticsearch.org/guide/reference/mapping/root-object-type.html\n\nthe first mapping that matches will be applied -- i.e. \"everything\" should only be applied to fields that don't match \"strings\".\n\nHowever, this isn't what I see (in 0.19.8 anyway) -- no matter which order I put the mappings in, all fields have \"everything\" applied, including string fields.\n\n(By the way, \"lowercase\" is a simple custom analayzer with a keyword tokenizer and lowercase token filter.)\n", "comments": [ { "body": "Unfortunately, it seems the dynamic template list in org.elasticsearch.index.mapper.object.RootObjectMapper is derived from a map, fetched as Map<String,Object> from the given JSON source. So the keys in the map are not guaranteed to be ordered sequentially. I guess, internally, the entry \"everything\" is ordered before the entry \"strings\".\n\nMy suggestion is to add a positional attribute (\"position\") to the dynamic_templates entries so ES can order them more reliably according to the users preference. Patch wanted?\n", "created_at": "2012-11-12T15:03:56Z" }, { "body": "Another cause may be that equals() and hashcode() methods in org.elasticsearch.index.mapper.object.DynamicTemplate do not work as expected.\n", "created_at": "2012-11-12T15:13:57Z" }, { "body": "It occurs to me that my original example is a bit bogus anyway -- since the omit_norms and omit_term_freq_and_positions are only valid for string types anyway. But the general point still stands...\n", "created_at": "2012-11-12T15:32:46Z" }, { "body": "Hi, this one is tricky... . The order is actually properly maintained of the dynamic templates, so thats not the problem (the array in the dynamic templates denotes the order, and we respect that).\n\nThe problem is with how we resolve dynamic templates, specifically, with `match_on_type`. When we encounter a `string` type, we first try and match on a dynamic template by name, _without_ the type. Then, if we don't match on it, we try and guess the type of the string value (it can be a date, an attachment, or numbers if numeric auto detection is turned on or something like that). If its not of any specialized non string type, only then we try and match on a dynamic template with the name and the `string` type as well.\n\nThe reason for this behavior is actually down to JSON and binary values. Because binary values in json are strings, trying to auto detect a date for example by trying to convert it to string ends up screwing up the internal parser binary value (I need to check if thats the case still). So we first need to try and match on name without actually knowing the type, and then match on the type...\n\nWhat you see happens because in the initial match on name (without type), it ends up actually matching on the catch all `everything` one, and then its used. \n\nThis one requires some thinking, not an easy one to solve...\n", "created_at": "2012-11-13T14:12:05Z" }, { "body": "Is there any plan to fix this? On recent ElasticSearch version it still happens. Is there any other way to provide specific dynamic mapping template for strings and another template for all other types?\n", "created_at": "2013-08-08T12:25:08Z" }, { "body": "I've just run into this as well. I want strings within a subpath to be analyzed with a specific analyzer, and for all other types in the same subpath to be not_analyzed (but with some other changes for which I need a mapping defined -- for example, a set index_name). It seems because of this behavior I may need to put my strings into a different subpath. I was hoping to avoid making structure choices in my documents just so I can map it correctly.\n", "created_at": "2014-10-24T17:31:27Z" }, { "body": "Wondering if an `unmatch_mapping_type` will help here? Possibly combined with rules without `match_mapping_type` being placed below rules with a specified (or wildcard) `match_mapping_type`?\n", "created_at": "2014-11-28T18:35:41Z" }, { "body": "Recently came across this. The following is the use case:\n\nhttps://gist.github.com/ppf2/6da223f9517ddc0e9465\n\nIn this case, what appears to work (Test 2 in the gist) is if I add `\"match_mapping_type\": \"*\"` in addition to `\"match\": \"*\"` in the dynamic template mapping for the default/everything fields.\n", "created_at": "2015-03-23T21:40:55Z" }, { "body": "This is a sweet workaround. It appears working for me. thanks\n", "created_at": "2015-03-26T04:29:51Z" }, { "body": "Given @ppf2 's workaround in https://github.com/elastic/elasticsearch/issues/2401#issuecomment-85208238 it seems that we just need to default `match_mapping_type` to `*`?\n", "created_at": "2015-04-05T11:57:13Z" }, { "body": "+1\n\nThe workaround of adding `\"match_mapping_type\": \"*\"` to all fields works in the meantime.\n\n``` js\nPUT /_template/log_template\n{\n \"template\": \"log*\",\n \"mappings\": {\n \"_default_\": {\n \"dynamic_templates\": [\n {\n \"timestamp\": {\n \"match\": \"@timestamp\",\n \"match_mapping_type\": \"*\",\n \"mapping\": {\n \"type\": \"date\",\n \"index\": \"not_analyzed\",\n \"doc_values\": true\n }\n }\n },\n {\n \"string_multifield\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"string\",\n \"fields\": {\n \"raw\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\",\n \"doc_values\": true\n }\n }\n }\n }\n },\n {\n \"catch_all\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"*\",\n \"mapping\": {\n \"index\": \"not_analyzed\",\n \"doc_values\": true\n }\n }\n }\n ]\n }\n }\n}\n```\n", "created_at": "2015-04-08T16:51:43Z" }, { "body": "@kimchy can you expand on what you mean here:\n\n> The reason for this behavior is actually down to JSON and binary values. Because binary values in json are strings, trying to auto detect a date for example by trying to convert it to string ends up screwing up the internal parser binary value (I need to check if thats the case still). So we first need to try and match on name without actually knowing the type, and then match on the type...\n\nThis patch:\n\n```\n if (unmatch != null && patternMatch(unmatch, name)) {\n return false;\n }\n- if (matchMappingType != null) {\n- if (dynamicType == null) {\n- return false;\n- }\n- if (!patternMatch(matchMappingType, dynamicType)) {\n- return false;\n- }\n+ if (dynamicType == null) {\n+ return false;\n+ }\n+ if (matchMappingType != null && !patternMatch(matchMappingType, dynamicType)) {\n+ return false;\n }\n return true;\n }\n```\n\nseems to work fine with binary strings, eg:\n\n```\nDELETE test\n\nPUT test\n{\n \"mappings\": {\n \"_default_\": {\n \"_source\": {\n \"enabled\": false\n },\n \"dynamic_templates\": [\n {\n \"dates\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"type\": \"date\",\n \"format\": \"YYYY-mm-dd\"\n }\n }\n },\n {\n \"everything\": {\n \"match\": \"*\",\n \"mapping\": {\n \"type\": \"binary\",\n \"store\": true\n }\n }\n }\n ]\n }\n }\n}\n\nPUT test/test/1\n{\n \"binary\": \"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVoB\",\n \"date\": \"2014-01-01\"\n}\n\nGET /test/test/_mapping\n```\n\nreturns:\n\n```\n \"_source\": {\n \"enabled\": false\n },\n \"properties\": {\n \"binary\": {\n \"type\": \"binary\",\n \"store\": true\n },\n \"date\": {\n \"type\": \"date\",\n \"format\": \"YYYY-mm-dd\"\n }\n }\n```\n\nand\n\n```\nGET test/test/_search?fields=*\n```\n\nreturns:\n\n```\n \"fields\": {\n \"binary\": [\n \"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVoB\"\n ]\n }\n```\n\nAnd to update the original example, this seems to work correctly:\n\n```\nDELETE test\n\nPUT test\n{\n \"mappings\": {\n \"_default_\": {\n \"dynamic_templates\": [\n {\n \"strings\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"string\",\n \"fields\": {\n \"raw\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n }\n }\n }\n }\n },\n {\n \"everything\": {\n \"match\": \"*\",\n \"mapping\": {\n \"type\": \"{dynamic_type}\",\n \"doc_values\": true\n }\n }\n }\n ]\n }\n }\n}\n\nPUT test/test/1\n{\n \"string\": \"bar\",\n \"bool\": true,\n \"date\": \"2014-01-01\",\n \"int\": 5\n}\n\nGET test/test/_mapping\n```\n\nreturns:\n\n```\n \"properties\": {\n \"bool\": {\n \"type\": \"boolean\",\n \"doc_values\": true\n },\n \"date\": {\n \"type\": \"date\",\n \"doc_values\": true,\n \"format\": \"dateOptionalTime\"\n },\n \"int\": {\n \"type\": \"long\",\n \"doc_values\": true\n },\n \"string\": {\n \"type\": \"string\",\n \"fields\": {\n \"raw\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n }\n }\n }\n```\n", "created_at": "2015-04-10T13:58:21Z" }, { "body": "Closed by https://github.com/elastic/elasticsearch/pull/18638\n", "created_at": "2016-05-31T10:13:23Z" } ], "number": 2401, "title": "Dynamic template with match_mapping_type ignored when there's a match \"*\" after it" }
{ "body": "When calling `findTemplateBuilder(context, currentFieldName, \"text\", null)`,\nelasticsearch ignores all templates that have a `match_mapping_type` set since\nno dynamic type is provided (the last parameter, which is null in that case).\nSo this should only be called _last_. Otherwise, if a path-based template\nmatches, it will have precedence over all type-based templates.\n\nCloses #18625\nCloses #2401\n", "number": 18638, "review_comments": [ { "body": "This code was initially added to fix #1418. But I checked and the bug does not reproduce without this block of code with recent versions of Jackson.\n", "created_at": "2016-05-30T14:36:40Z" } ], "title": "Process dynamic templates in order." }
{ "commits": [ { "message": "Process dynamic templates in order. #18638\n\nWhen calling `findTemplateBuilder(context, currentFieldName, \"text\", null)`,\nelasticsearch ignores all templates that have a `match_mapping_type` set since\nno dynamic type is provided (the last parameter, which is null in that case).\nSo this should only be called _last_. Otherwise, if a path-based template\nmatches, it will have precedence over all type-based templates.\n\nCloses #18625" } ], "files": [ { "diff": "@@ -676,16 +676,6 @@ private static Mapper.Builder<?,?> createBuilderFromFieldType(final ParseContext\n \n private static Mapper.Builder<?,?> createBuilderFromDynamicValue(final ParseContext context, XContentParser.Token token, String currentFieldName) throws IOException {\n if (token == XContentParser.Token.VALUE_STRING) {\n- // do a quick test to see if its fits a dynamic template, if so, use it.\n- // we need to do it here so we can handle things like attachment templates, where calling\n- // text (to see if its a date) causes the binary value to be cleared\n- {\n- Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, \"text\", null);\n- if (builder != null) {\n- return builder;\n- }\n- }\n-\n if (context.root().dateDetection()) {\n String text = context.parser().text();\n // a safe check since \"1\" gets parsed as well", "filename": "core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java", "status": "modified" }, { "diff": "@@ -278,7 +278,7 @@ public Mapper.Builder findTemplateBuilder(ParseContext context, String name, Str\n return typeParser.parse(name, dynamicTemplate.mappingForName(name, dynamicType), parserContext);\n }\n \n- public DynamicTemplate findTemplate(ContentPath path, String name, String matchType) {\n+ private DynamicTemplate findTemplate(ContentPath path, String name, String matchType) {\n for (DynamicTemplate dynamicTemplate : dynamicTemplates) {\n if (dynamicTemplate.match(path, name, matchType)) {\n return dynamicTemplate;", "filename": "core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java", "status": "modified" }, { "diff": "@@ -34,6 +34,7 @@\n import org.elasticsearch.index.mapper.core.BooleanFieldMapper;\n import org.elasticsearch.index.mapper.core.BooleanFieldMapper.BooleanFieldType;\n import org.elasticsearch.index.mapper.core.DateFieldMapper;\n+import org.elasticsearch.index.mapper.core.KeywordFieldMapper;\n import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType;\n import org.elasticsearch.index.mapper.core.NumberFieldMapper;\n import org.elasticsearch.index.mapper.core.NumberFieldMapper.NumberFieldType;\n@@ -645,4 +646,32 @@ public void testNumericDetectionDefault() throws Exception {\n mapper = defaultMapper.mappers().smartNameFieldMapper(\"s_double\");\n assertThat(mapper, instanceOf(TextFieldMapper.class));\n }\n+\n+ public void testDynamicTemplateOrder() throws IOException {\n+ // https://github.com/elastic/elasticsearch/issues/18625\n+ // elasticsearch used to apply templates that do not have a match_mapping_type first\n+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .startArray(\"dynamic_templates\")\n+ .startObject()\n+ .startObject(\"type-based\")\n+ .field(\"match_mapping_type\", \"string\")\n+ .startObject(\"mapping\")\n+ .field(\"type\", \"keyword\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .startObject()\n+ .startObject(\"path-based\")\n+ .field(\"path_match\", \"foo\")\n+ .startObject(\"mapping\")\n+ .field(\"type\", \"long\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endArray()\n+ .endObject().endObject();\n+ IndexService index = createIndex(\"test\", Settings.EMPTY, \"type\", mapping);\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"foo\", \"abc\").get();\n+ assertThat(index.mapperService().fullName(\"foo\"), instanceOf(KeywordFieldMapper.KeywordFieldType.class));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java", "status": "modified" } ] }
{ "body": "In the 2.x branch, the `TransportNodesAction` class defines two methods:\n\n```\n protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {\n return nodesIds;\n }\n\n protected String[] resolveNodes(NodesRequest request, ClusterState clusterState) {\n return clusterState.nodes().resolveNodesIds(request.nodesIds());\n }\n```\n\nwhich basically return all the nodes in the cluster (`resolveNodes`) and just the nodes IDs received as parameter (`filterNodeIds`), respectively.\n\nThe constructor, though, does the following:\n\n```\n String[] nodesIds = resolveNodes(request, clusterState);\n this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);\n ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();\n this.nodes = new DiscoveryNode[nodesIds.length];\n for (int i = 0; i < nodesIds.length; i++) {\n this.nodes[i] = nodes.get(nodesIds[i]);\n }\n this.responses = new AtomicReferenceArray<>(this.nodesIds.length);\n }\n```\n\nSo, the list of nodes - `this.nodes[i] = nodes.get(nodesIds[i]);` - is being built considering the result of `resolveNodes()` method (meaning all the nodes), not on the filtered list of nodes (the result of `filterNodeIds` method), which should be the desired behavior.\n", "comments": [], "number": 18618, "title": "Filtered list of nodes IDs in TransportNodesAction isn't actually considered" }
{ "body": "Don't mix up member variables with local variables\nin constructor.\n\ncloses #18618\n", "number": 18634, "review_comments": [ { "body": "maybe we should just get rid of this local variable and write the next line:\n\n```\nnodesIds = filterNodeIds(clusterState.nodes(), resolveNodes(request, clusterState));\n```\n", "created_at": "2016-05-30T11:25:56Z" }, { "body": "We should also get rid of this local variable (The map is not needed as `DiscoveryNodes` has a method `get()`).\n\nWe could write `this.nodes[i] = clusterState.nodes().get(nodesIds[i]);` a few lines below.\n\nIf `clusterState.nodes()` is too verbose, we can extract that one into a local variable.\n", "created_at": "2016-05-30T11:28:38Z" }, { "body": "please remove the additional `getNodes()` here :-)\n", "created_at": "2016-05-30T13:11:45Z" }, { "body": "ok, done\n", "created_at": "2016-05-30T13:16:40Z" } ], "title": "Fix filtering of node ids for TransportNodesAction" }
{ "commits": [ { "message": "Fix filtering of node ids for TransportNodesAction\n\nDon't mix up member variables with local variables\nin constructor.\n\ncloses #18618" }, { "message": "remove local variables" }, { "message": "remove superfluous getNodes()" } ], "files": [ { "diff": "@@ -176,12 +176,10 @@ class AsyncAction {\n this.request = request;\n this.listener = listener;\n ClusterState clusterState = clusterService.state();\n- String[] nodesIds = resolveNodes(request, clusterState);\n- this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);\n- ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().getNodes();\n+ nodesIds = filterNodeIds(clusterState.nodes(), resolveNodes(request, clusterState));\n this.nodes = new DiscoveryNode[nodesIds.length];\n for (int i = 0; i < nodesIds.length; i++) {\n- this.nodes[i] = nodes.get(nodesIds[i]);\n+ this.nodes[i] = clusterState.nodes().get(nodesIds[i]);\n }\n this.responses = new AtomicReferenceArray<>(this.nodesIds.length);\n }", "filename": "core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java", "status": "modified" }, { "diff": "@@ -66,9 +66,10 @@ public class TransportNodesActionTests extends ESTestCase {\n \n private ClusterService clusterService;\n private CapturingTransport transport;\n- private TestTransportNodesAction action;\n+ private TransportService transportService;\n \n public void testRequestIsSentToEachNode() throws Exception {\n+ TransportNodesAction action = getTestTransportNodesAction();\n TestNodesRequest request = new TestNodesRequest();\n PlainActionFuture<TestNodesResponse> listener = new PlainActionFuture<>();\n action.new AsyncAction(null, request, listener).start();\n@@ -79,6 +80,7 @@ public void testRequestIsSentToEachNode() throws Exception {\n }\n \n public void testNodesSelectors() {\n+ TransportNodesAction action = getTestTransportNodesAction();\n int numSelectors = randomIntBetween(1, 5);\n Set<String> nodeSelectors = new HashSet<>();\n for (int i = 0; i < numSelectors; i++) {\n@@ -98,10 +100,12 @@ public void testNodesSelectors() {\n }\n \n public void testNewResponseNullArray() {\n+ TransportNodesAction action = getTestTransportNodesAction();\n expectThrows(NullPointerException.class, () -> action.newResponse(new TestNodesRequest(), null));\n }\n \n public void testNewResponse() {\n+ TestTransportNodesAction action = getTestTransportNodesAction();\n TestNodesRequest request = new TestNodesRequest();\n List<TestNodeResponse> expectedNodeResponses = mockList(TestNodeResponse.class, randomIntBetween(0, 2));\n expectedNodeResponses.add(new TestNodeResponse());\n@@ -125,6 +129,19 @@ public void testNewResponse() {\n assertTrue(failures.containsAll(response.failures()));\n }\n \n+ public void testFiltering() throws Exception {\n+ TransportNodesAction action = getFilteringTestTransportNodesAction(transportService);\n+ TestNodesRequest request = new TestNodesRequest();\n+ PlainActionFuture<TestNodesResponse> listener = new PlainActionFuture<>();\n+ action.new AsyncAction(null, request, listener).start();\n+ Map<String, List<CapturingTransport.CapturedRequest>> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear();\n+ // check requests were only sent to data nodes\n+ for (String nodeTarget : capturedRequests.keySet()) {\n+ assertTrue(clusterService.state().nodes().get(nodeTarget).isDataNode());\n+ }\n+ assertEquals(clusterService.state().nodes().getDataNodes().size(), capturedRequests.size());\n+ }\n+\n private <T> List<T> mockList(Class<T> clazz, int size) {\n List<T> failures = new ArrayList<>(size);\n for (int i = 0; i < size; ++i) {\n@@ -160,7 +177,7 @@ public void setUp() throws Exception {\n super.setUp();\n transport = new CapturingTransport();\n clusterService = createClusterService(THREAD_POOL);\n- final TransportService transportService = new TransportService(transport, THREAD_POOL, clusterService.state().getClusterName());\n+ transportService = new TransportService(transport, THREAD_POOL, clusterService.state().getClusterName());\n transportService.start();\n transportService.acceptIncomingRequests();\n int numNodes = randomIntBetween(3, 10);\n@@ -182,7 +199,17 @@ public void setUp() throws Exception {\n stateBuilder.nodes(discoBuilder);\n ClusterState clusterState = stateBuilder.build();\n setState(clusterService, clusterState);\n- action = new TestTransportNodesAction(\n+ }\n+\n+ @After\n+ public void tearDown() throws Exception {\n+ super.tearDown();\n+ clusterService.close();\n+ transport.close();\n+ }\n+\n+ public TestTransportNodesAction getTestTransportNodesAction() {\n+ return new TestTransportNodesAction(\n Settings.EMPTY,\n THREAD_POOL,\n clusterService,\n@@ -194,11 +221,17 @@ public void setUp() throws Exception {\n );\n }\n \n- @After\n- public void tearDown() throws Exception {\n- super.tearDown();\n- clusterService.close();\n- transport.close();\n+ public FilteringTestTransportNodesAction getFilteringTestTransportNodesAction(TransportService transportService) {\n+ return new FilteringTestTransportNodesAction(\n+ Settings.EMPTY,\n+ THREAD_POOL,\n+ clusterService,\n+ transportService,\n+ new ActionFilters(Collections.emptySet()),\n+ TestNodesRequest::new,\n+ TestNodeRequest::new,\n+ ThreadPool.Names.SAME\n+ );\n }\n \n private static DiscoveryNode newNode(int nodeId, Map<String, String> attributes, Set<DiscoveryNode.Role> roles) {\n@@ -243,6 +276,21 @@ protected boolean accumulateExceptions() {\n }\n }\n \n+ private static class FilteringTestTransportNodesAction\n+ extends TestTransportNodesAction {\n+\n+ FilteringTestTransportNodesAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService\n+ transportService, ActionFilters actionFilters, Supplier<TestNodesRequest> request,\n+ Supplier<TestNodeRequest> nodeRequest, String nodeExecutor) {\n+ super(settings, threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor);\n+ }\n+\n+ @Override\n+ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {\n+ return nodes.getDataNodes().keys().toArray(String.class);\n+ }\n+ }\n+\n private static class TestNodesRequest extends BaseNodesRequest<TestNodesRequest> {\n TestNodesRequest(String... nodesIds) {\n super(nodesIds);", "filename": "core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java", "status": "modified" } ] }
{ "body": "I have found two test failures in `InternalEngineTests` that consistently fail:\n\n1) `testDocStats`\n\nReproduces with:\n`gradle :core:test -Dtests.seed=19501DA878395CCF -Dtests.class=org.elasticsearch.index.engine.InternalEngineTests -Dtests.method=\"testDocStats\" -Dtests.security.manager=true -Dtests.locale=ar-SY -Dtests.timezone=Europe/Helsinki`\n\nSeems related to the change done in https://github.com/elastic/elasticsearch/pull/18587\n\n2) `testTranslogReplay`\n\nI have not been able to get this to reproduce, though it has failed numerous times on the date of the creation of this issue, for example see: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-intake/668/console\n\nThe reproduce line, FWIW:\n\n`gradle :core:test -Dtests.seed=95E0E2968BF16DDB -Dtests.class=org.elasticsearch.index.engine.InternalEngineTests -Dtests.method=\"testTranslogReplay\" -Dtests.security.manager=true -Dtests.locale=da -Dtests.timezone=US/Pacific`\n", "comments": [ { "body": "The `testTranslogReplay` failure was addressed by #18611.\n", "created_at": "2016-05-28T10:48:28Z" } ], "number": 18623, "title": "InternalEngineTests failures" }
{ "body": "Modifying the translog replay to not replay again into the translog\nintroduced a bug for the case of multiple operations for the same\ndoc. Namely, since we were no longer updating the version map for each\noperation, the second operation for a doc would be treated as a creation\ninstead of as an update. This commit fixes this bug by placing these\noperations into version map. This commit includes a failing test case.\n\nRelates #18547\nRelates #18623\n", "number": 18611, "review_comments": [ { "body": "good!!\n", "created_at": "2016-05-27T13:24:59Z" }, { "body": "can you put a comment here that we flush the version map after recovery so null is fine here...\n", "created_at": "2016-05-27T13:25:30Z" }, { "body": "I pushed 41a953b3fb086d402123573c9b93bc07791649cd.\n", "created_at": "2016-05-27T13:42:51Z" } ], "title": "Fix translog replay multiple operations same doc" }
{ "commits": [ { "message": "Fix translog replay multiple operations same doc\n\nModifying the translog replay to not replay again into the translog\nintroduced a bug for the case of multiple operations for the same\ndoc. Namely, since we were no longer updating the version map for each\noperation, the second operation for a doc would be treated as a creation\ninstead of as an update. This commit fixes this bug by placing these\noperations into version map. This commit includes a failing test case." }, { "message": "Add comment on version map during translog replay\n\nThis commit adds a comment clarifying the use of the version map during\na translog replay." } ], "files": [ { "diff": "@@ -394,6 +394,12 @@ private boolean innerIndex(Index index) throws IOException {\n final Translog.Location translogLocation = translog.add(new Translog.Index(index));\n index.setTranslogLocation(translogLocation);\n versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, index.getTranslogLocation()));\n+ } else {\n+ // we do not replay in to the translog, so there is no\n+ // translog location; that is okay because real-time\n+ // gets are not possible during recovery and we will\n+ // flush when the recovery is complete\n+ versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, null));\n }\n \n return created;\n@@ -497,6 +503,12 @@ private void innerDelete(Delete delete) throws IOException {\n final Translog.Location translogLocation = translog.add(new Translog.Delete(delete));\n delete.setTranslogLocation(translogLocation);\n versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), delete.getTranslogLocation()));\n+ } else {\n+ // we do not replay in to the translog, so there is no\n+ // translog location; that is okay because real-time\n+ // gets are not possible during recovery and we will\n+ // flush when the recovery is complete\n+ versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), null));\n }\n }\n }", "filename": "core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java", "status": "modified" }, { "diff": "@@ -54,9 +54,10 @@ public Translog.Location translogLocation() {\n \n @Override\n public long ramBytesUsed() {\n- return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Long.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed();\n+ return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Long.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF +\n+ (translogLocation != null ? translogLocation.size : 0);\n }\n- \n+\n @Override\n public Collection<Accountable> getChildResources() {\n return Collections.emptyList();", "filename": "core/src/main/java/org/elasticsearch/index/engine/VersionValue.java", "status": "modified" }, { "diff": "@@ -44,6 +44,7 @@\n import org.apache.lucene.search.MatchAllDocsQuery;\n import org.apache.lucene.search.TermQuery;\n import org.apache.lucene.search.TopDocs;\n+import org.apache.lucene.search.TotalHitCountCollector;\n import org.apache.lucene.store.AlreadyClosedException;\n import org.apache.lucene.store.Directory;\n import org.apache.lucene.store.MockDirectoryWrapper;\n@@ -591,6 +592,42 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException {\n engine.flush();\n }\n \n+ public void testTranslogMultipleOperationsSameDocument() throws IOException {\n+ final int ops = randomIntBetween(1, 32);\n+ Engine initialEngine;\n+ final List<Engine.Operation> operations = new ArrayList<>();\n+ try {\n+ initialEngine = engine;\n+ for (int i = 0; i < ops; i++) {\n+ final ParsedDocument doc = testParsedDocument(\"1\", \"1\", \"test\", null, -1, -1, testDocumentWithTextField(), new BytesArray(\"{}\".getBytes(Charset.defaultCharset())), null);\n+ if (randomBoolean()) {\n+ final Engine.Index operation = new Engine.Index(newUid(\"test#1\"), doc, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime());\n+ operations.add(operation);\n+ initialEngine.index(operation);\n+ } else {\n+ final Engine.Delete operation = new Engine.Delete(\"test\", \"1\", newUid(\"test#1\"), i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false);\n+ operations.add(operation);\n+ initialEngine.delete(operation);\n+ }\n+ }\n+ } finally {\n+ IOUtils.close(engine);\n+ }\n+\n+ Engine recoveringEngine = null;\n+ try {\n+ recoveringEngine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));\n+ recoveringEngine.recoverFromTranslog();\n+ try (Engine.Searcher searcher = recoveringEngine.acquireSearcher(\"test\")) {\n+ final TotalHitCountCollector collector = new TotalHitCountCollector();\n+ searcher.searcher().search(new MatchAllDocsQuery(), collector);\n+ assertThat(collector.getTotalHits(), equalTo(operations.get(operations.size() - 1) instanceof Engine.Delete ? 0 : 1));\n+ }\n+ } finally {\n+ IOUtils.close(recoveringEngine);\n+ }\n+ }\n+\n public void testTranslogRecoveryDoesNotReplayIntoTranslog() throws IOException {\n final int docs = randomIntBetween(1, 32);\n Engine initialEngine = null;", "filename": "core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\nUsing the offical Elasticsearch Docker image for 2.3.1\n\n**Elasticsearch version**: 2.3.1\n\n**JVM version**: java:8-jre\n\n**OS version**: debian jessie\n## **Description of the problem including expected versus actual behavior**:\n### Actual\n\nUsing DateTimeZone with an inline script results in an error. \n\n```\njava.io.IOException: Resource not found: \"org/joda/time/tz/data/America/New_York\"\n```\n### Expected\n\nThe Correct DateTimeZone instance to be returned.\n## **Steps to reproduce**:\n1. Start elasticsearch 2.3.1.\n2. Pass the startup param `-Des.script.engine.groovy.inline.aggs=true`\n3. Create index and mapping with a `date_time_no_millis` field.\n4. Run a search query like the one in `Appendix: A`\n5. Notice an error like `The datetime zone id 'America/New_York' is not recognised`\n6. Run a Date Histogram query like the one in `Appendix: B`\n7. The same error will occur\n8. Restart the elasticsearch instance\n9. Re-run Appendix A and B, they now both work.\n## Appendix\n### A: Aggregate Query\n\n``` json\n{\n \"size\": 0,\n \"aggs\": {\n \"group_by_hour\": {\n \"terms\": {\n \"script\": \"def opDate = new DateTime(doc['operation_date'].date); opDate.withZone(DateTimeZone.forID('America/New_York')).getHourOfDay()\",\n \"order\": {\n \"_term\": \"asc\"\n }\n }\n }\n }\n}\n```\n### B: Date Histogram Query\n\n``` json\n{\n \"size\": 0,\n \"aggs\": {\n \"group_by_hour\": {\n \"date_histogram\": {\n \"field\": \"date_field\",\n \"interval\": \"hour\",\n \"format\": \"H\",\n \"time_zone\": \"America/New_York\"\n }\n }\n }\n}\n```\n\n<!--\nIf you are filing a feature request, please remove the above bug\nreport block and provide responses for all of the below items.\n-->\n", "comments": [ { "body": "Whether this fails or not depends on the order of requests, eg given this document:\n\n```\nPUT t/t/1\n{\n \"operation_date\": \"2001/10/10\"\n}\n```\n\nIf you run a non-scripting agg which refers to the time zone first:\n\n```\nGET _search\n{\n \"aggs\": {\n \"NAME\": {\n \"date_histogram\": {\n \"field\": \"operation_date\",\n \"interval\": \"hour\",\n \"time_zone\": \"America/New_York\"\n }\n }\n }\n}\n```\n\nthen this request succeeds, and so do the scripting request:\n\n```\nGET _search\n{\n \"size\": 0,\n \"aggs\": {\n \"group_by_hour\": {\n \"terms\": {\n \"script\": \"def opDate = new DateTime(doc['operation_date'].date); opDate.withZone(DateTimeZone.forID('America/New_York')).getHourOfDay()\",\n \"order\": {\n \"_term\": \"asc\"\n }\n }\n }\n }\n}\n```\n\nIf you reverse the order of the searches, then both fail.\n", "created_at": "2016-04-29T08:25:01Z" }, { "body": "If you don't have an aggregation, there is currently no way to get timezones and work with scripts. Any ETA on fixing this? Any workaround that can be defined inside the script?\n\nI tried modifying the `java.policy` file but doesn't look like it works. Isn't this related to #14524? Is there any workaround other than performing a fake search operation before running the script?\n", "created_at": "2016-05-13T13:12:58Z" }, { "body": "Modifying the `java.policy` file will not help, the issue is in Joda Time.\n", "created_at": "2016-05-13T13:15:41Z" }, { "body": "Thanks for the quick answer @jasontedor . So there isn't a real workaround? Is it related to JodaOrg/joda-time#327? Until this is not fixed, one cannot do this operation? \n\nIs there something that we can do for this? Looks like the Joda Time issue has been opened for a while already.\n", "created_at": "2016-05-13T13:17:56Z" }, { "body": "I opened JodaOrg/joda-time#375. When we can incorporate a new release of Joda Time that contains this into Elasticsearch we will be able to close this bug out. \n", "created_at": "2016-05-19T02:13:10Z" }, { "body": "@jasontedor is this going to be added in a `2.3.x` release for ES?\n", "created_at": "2016-05-19T11:45:00Z" }, { "body": "> is this going to be added in a 2.3.x release for ES?\n\nI don't know; we have to see if JodaOrg/joda-time#375 is accepted into Joda Time first, and if it is, the timeline under which a bug fix release of Joda Time is made that includes it that we can incorporate into Elasticsearch. \n", "created_at": "2016-05-19T11:47:52Z" } ], "number": 18017, "title": "Issue with inline script using Joda DateTimeZone." }
{ "body": "This commit upgrades joda-time to version 2.9.4 to integrate a bug fix\nthere into Elasticsearch.\n\nCloses #14524, closes #18017\n", "number": 18609, "review_comments": [], "title": "Upgrade joda-time to 2.9.4" }
{ "commits": [ { "message": "Upgrade joda-time to 2.9.4\n\nThis commit upgrades joda-time to version 2.9.4 to integrate a bug fix\nthere into Elasticsearch." } ], "files": [ { "diff": "@@ -63,7 +63,7 @@ dependencies {\n compile 'com.carrotsearch:hppc:0.7.1'\n \n // time handling, remove with java 8 time\n- compile 'joda-time:joda-time:2.8.2'\n+ compile 'joda-time:joda-time:2.9.4'\n // joda 2.0 moved to using volatile fields for datetime\n // When updating to a new version, make sure to update our copy of BaseDateTime\n compile 'org.joda:joda-convert:1.2'", "filename": "core/build.gradle", "status": "modified" }, { "diff": "@@ -0,0 +1 @@\n+1c295b462f16702ebe720bbb08f62e1ba80da41b\n\\ No newline at end of file", "filename": "distribution/licenses/joda-time-2.9.4.jar.sha1", "status": "added" }, { "diff": "@@ -44,12 +44,6 @@ public class GroovySecurityTests extends ESTestCase {\n \n private GroovyScriptEngineService se;\n \n- static {\n- // ensure we load all the timezones in the parent classloader with all permissions\n- // relates to https://github.com/elastic/elasticsearch/issues/14524\n- org.joda.time.DateTimeZone.getDefault();\n- }\n-\n @Override\n public void setUp() throws Exception {\n super.setUp();", "filename": "modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 1.7.5 (Tested against 2.0.2, 2.1.2, 2.2.2, and 2.3.2 as well and this is not an issue in any of those releases)\n\n**JVM version**: 1.8.0_45\n\n**OS version**: OSX 10.11.4\n\n**Problem**:\n\nWhen running a function score query as a must_not bool clause, approximately half of the expected documents are being returned. I've been playing around with replicas, number of shards, optimizing, and refreshing on insert which all have minimal to no impact, so for the sake of testing an demonstration and to eliminate variables, the example focuses on 1 shard with no replicas. I also played around with different score functions and found this same behavior with all functions I tested.\n\n**Steps to reproduce**:\n\n``` shell\n# Create the sample index with a single shard\ncurl -XPUT http://localhost:9200/fnscore-test-index -d '{\n \"settings\": {\n \"index\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n}'\n\n# Index some documents. Two should be enough to demonstrate the issue \n# in a single-shard index, but I saw some inconsistencies around how many documents\n# failed to match appropriately in smaller sample sizes, so crank up the sample size\n# until you see the query count mismatches\nfor i in {1..2}; do\n curl -XPUT \"http://localhost:9200/fnscore-test-index/event/$i\" -d \"{\\\"field\\\":\\\"value-$i\\\"}\"\ndone\n\n# Run the must_not function score query, sticking with a simple function\n# score script that SHOULD match every document, therefore making the\n# must_not match zero documents\ncurl -XPOST http://localhost:9200/fnscore-test-index/event/_count -d '{\n \"query\": {\n \"bool\": {\n \"must_not\": [\n {\n \"function_score\": {\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"-1\",\n \"lang\": \"expression\"\n }\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\",\n \"min_score\": 0\n }\n }\n ]\n }\n }\n}'\n# ...which returns...\n{\n \"count\": 1, # This should be 2 \n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n }\n}\n```\n\nSince the function score query should match zero documents, the must_not should (in theory) return ALL documents, but it's really only returning somewhere around half. Note that small sample sizes have (greater sample sizes )\n\n**Interesting (maybe) notes**:\n- Adding a refresh after each insert changes how many documents fail to match the query criteria in small samples, but with an increased sample size, that difference diminishes as the sample size increases, more/less completely vanishing in the 80-100 document range\n- Using a `not` filter instead of a bool query `must_not` \"fixes\" the problem and returns all expected results (this requires the function score query to be wrapped in a query filter, of course)\n- Adding `must` and `should` clauses to the bool query has no impact on the result\n\nNote that this is only an issue in 1.7.5 and below, which I totally understand makes it way lower priority. I've tested with all of the most recent 2.x point releases and have been unable to reproduce with those releases. I'm putting in this report more because I'm not super familiar w/ bool internals and wanted to make sure this wasn't something systemic that may cause future issues.\n", "comments": [ { "body": "Hi @rusnyder \n\nI've tried out your replication and it seems to work just fine on 1.7.5. This is what I did:\n\n```\nPUT /fnscore-test-index\n{\n \"settings\": {\n \"index\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n}\n\nPUT /fnscore-test-index/event/1\n{\"field\":\"value-1\"}\nPUT /fnscore-test-index/event/2\n{\"field\":\"value-2\"}\n\nPOST /fnscore-test-index/event/_count\n{\n \"query\": {\n \"bool\": {\n \"must_not\": [\n {\n \"function_score\": {\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"-1\",\n \"lang\": \"expression\"\n }\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\",\n \"min_score\": 0\n }\n }\n ]\n }\n }\n}\n```\n\n> Adding a refresh after each insert changes how many documents fail to match the query criteria in small samples, but with an increased sample size, that difference diminishes as the sample size increases, more/less completely vanishing in the 80-100 document range\n\nThis makes me think you have changed the refresh interval on this index? I think if you try this on a clean cluster with default settings, you'll find it working just fine.\n", "created_at": "2016-05-13T17:08:35Z" }, { "body": "I'm seeing the same behavior as you (as in, not broken) when executing the same commands as you on a fresh cluster, but am still able to reproduce my issue by replacing the independent index requests with either a for loop in bash OR (newly discovered) with a bulk request:\n\n_NOTE: I'll switch to using sense-like syntax for sharing queries - sorry for the bash!_\n\n``` shell\nDELETE /fnscore-test-index\n\nPUT /fnscore-test-index\n{\n \"settings\": {\n \"index\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n}\n\nPOST /fnscore-test-index/_bulk\n{\"index\":{\"_type\":\"event\",\"_id\":\"1\"}}\n{\"field\":\"value-1\"}\n{\"index\":{\"_type\":\"event\",\"_id\":\"2\"}}\n{\"field\":\"value-2\"}\n\nPOST /fnscore-test-index/event/_count\n{\n \"query\": {\n \"bool\": {\n \"must_not\": [\n {\n \"function_score\": {\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"-1\",\n \"lang\": \"expression\"\n }\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\",\n \"min_score\": 0\n }\n }\n ]\n }\n }\n}\n```\n\nSince both a for loop of individual index requests and a bulk request are able reproduce the issue (for me, anyway - the verdict is out for yourself!), it leads me to believe the lack-of-delay between insert statements is closely tied to the underlying issue. It also makes me feel like it's not a fun one.\n", "created_at": "2016-05-17T15:01:08Z" }, { "body": "And if you do:\n\n```\nPOST fnscore-test-index/_refresh\n```\n\nbefore your count?\n", "created_at": "2016-05-17T18:45:44Z" }, { "body": "Adding a refresh doesn't change the count. I then also tried to optimize, just for kicks and giggles, since that fixed similar (yet unrelated) issues in another cluster to no avail:\n\n```\nPOST /fnscore-test-index/_optimize?max_num_segments=1\n```\n\nI added the explain param to the \"failing\" query and even the explain results indicate that the hit that is being returned shouldn't, in fact, be returned since it's not actually a search hit (either that or I'm not reading the explain output correctly, which is possible):\n\n``` javascript\nPOST /fnscore-test-index/event/_search?explain\n{\n \"query\": {\n \"bool\": {\n \"must_not\": [\n {\n \"function_score\": {\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"-1\",\n \"lang\": \"expression\"\n }\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\",\n \"min_score\": 0\n }\n }\n ]\n }\n }\n}\n\n// Note that value/description indicate that this shouldn't have hit:\n\n{\n \"took\": 1,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 1,\n \"hits\": [\n {\n \"_shard\": 0,\n \"_node\": \"nvCXAPZoS3OImOn5X0l72A\",\n \"_index\": \"fnscore-test-index\",\n \"_type\": \"event\",\n \"_id\": \"1\",\n \"_score\": 1,\n \"_source\": {\n \"field\": \"value-1\"\n },\n \"_explanation\": {\n \"value\": 0,\n \"description\": \"Failure to meet condition(s) of required/prohibited clause(s)\",\n \"details\": [\n {\n \"value\": 0,\n \"description\": \"match on prohibited clause (function score (ConstantScore(*:*),function=script[-1], params [null]))\",\n \"details\": [\n {\n \"value\": -1,\n \"description\": \"function score, product of:\",\n \"details\": [\n {\n \"value\": -1,\n \"description\": \"Math.min of\",\n \"details\": [\n {\n \"value\": -1,\n \"description\": \"script score function, computed with script:\\\"-1\",\n \"details\": [\n {\n \"value\": 1,\n \"description\": \"_score: \",\n \"details\": [\n {\n \"value\": 1,\n \"description\": \"ConstantScore(*:*), product of:\",\n \"details\": [\n {\n \"value\": 1,\n \"description\": \"boost\"\n },\n {\n \"value\": 1,\n \"description\": \"queryNorm\"\n }\n ]\n }\n ]\n }\n ]\n },\n {\n \"value\": 3.4028235e+38,\n \"description\": \"maxBoost\"\n }\n ]\n },\n {\n \"value\": 1,\n \"description\": \"queryBoost\"\n }\n ]\n }\n ]\n },\n {\n \"value\": 1,\n \"description\": \"ConstantScore(*:*), product of:\",\n \"details\": [\n {\n \"value\": 1,\n \"description\": \"boost\"\n },\n {\n \"value\": 1,\n \"description\": \"queryNorm\"\n }\n ]\n }\n ]\n }\n }\n ]\n }\n}\n```\n", "created_at": "2016-05-17T20:41:58Z" }, { "body": "> I'm seeing the same behavior as you (as in, not broken) when executing the same commands as you on a fresh cluster, but am still able to reproduce my issue by replacing the independent index requests with either a for loop in bash OR (newly discovered) with a bulk request:\n\nDo you mean that you're able to replicate this on a fresh cluster of 1.7.5? Or only on your existing cluster?\n\nI've run your recreation many times without seeing it fail once.\n", "created_at": "2016-05-18T11:06:26Z" }, { "body": "My apologies - I mean in a fresh cluster. Perhaps it's something machine or JVM specific. I'll try reproducing this with a few more combinations of machine + JVM version and get back with more info.\n\nEDIT: To be clear, by \"fresh cluster\" I mean deleting the data directory entirely, then starting Elasticsearch and running the recreation script.\n", "created_at": "2016-05-18T20:09:20Z" }, { "body": "I've gotten a few other devs here to reproduce the issue w/ fresh ES 1.7.5 installs and fresh clusters on some different OS's and Java versions (combinations of Fedora, Ubuntu, and OS X with Java 1.8 updates 45, 60, and 91). I've also created a now-failing test that reproduces the issue (sorry I didn't lead with this!): https://github.com/rusnyder/elasticsearch/commit/554e36c5855ab5a314f1d32d968d57493f1f617a\n\nI can inline a patch for that test, if desired, but otherwise will just spend some time this weekend trying to figure out what's happening.\n", "created_at": "2016-05-20T18:01:28Z" }, { "body": "I think I figured out what the issue is, but I haven't quite worked out a solution yet.\n\nDuring the query phase of the aforementioned queries (`must_not` wrapping a `function_score`), this internally gets scored using Lucene's [`ReqExclScorer`](https://github.com/apache/lucene-solr/blob/branch_4x/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java) wrapping an effective match all as the required scorer (more specifically, a `ConstantScore(*:*)` and a [`CustomBoostFactorScorer`](https://github.com/elastic/elasticsearch/blob/1.7/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java) as the exclusion scorer. When using the `min_score` parameter of the function score query, the `CustomBoostFactorScorer` delegates advancement through the document set to its inner class, [`MinScoreNextDoc`](https://github.com/elastic/elasticsearch/blob/1.7/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java#L92)\n\nThe problem is that `MinScoreNextDoc.advance(int)` and `ReqExclScorer.nextDoc` are stepping on each other's toes, because both are advancing (calling `nextDoc()`) on the underlying scorer but`ReqExclScorer` is more/less expecting to be the only thing doing the advancement while marking exclusions.\n\nI'll walk through the code path of concern here (all info on variables was found by and can reproduced by stepping through the test case I linked in the previous comment):\n\n[`org.apache.lucene.search.Weight.DefaultBulkScorer#scoreAll(Collector, Scorer)`](https://github.com/apache/lucene-solr/blob/branch_4x/lucene/core/src/java/org/apache/lucene/search/Weight.java#L190)\n\n``` java\npublic abstract class Weight {\n ...\n static class DefaultBulkScorer extends BulkScorer {\n /* [rusnyder] This field, in our test case, is set to the ReqExclScorer of concern */\n private final Scorer scorer;\n ...\n static void scoreAll(Collector collector, Scorer scorer) throws IOException {\n int doc;\n /* [rusnyder] scorer.nextDoc() -> see next snippet */\n while ((doc = scorer.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {\n collector.collect(doc);\n }\n }\n ...\n }\n}\n```\n\n[`org.apache.lucene.search.ReqExclScorer#nextDoc()`](https://github.com/apache/lucene-solr/blob/releases/lucene-solr/4.10.4/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java#L46)\n\n``` java\nclass ReqExclScorer extends Scorer {\n ...\n public int nextDoc() throws IOException {\n if (reqScorer == null) {\n return doc;\n }\n /* [rusnyder] This is the first of 2 document advancements in the ReqExclScorer */\n doc = reqScorer.nextDoc();\n if (doc == NO_MORE_DOCS) {\n reqScorer = null; // exhausted, nothing left\n return doc;\n }\n if (exclDisi == null) {\n return doc;\n }\n /* [rusnyder] toNonExcluded() -> see next snippet */\n return doc = toNonExcluded();\n }\n ...\n}\n```\n\n[`org.apache.lucene.search.ReqExclScorer#toNonExcluded()`](https://github.com/apache/lucene-solr/blob/releases/lucene-solr/4.10.4/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java#L72)\n\n``` java\nclass ReqExclScorer extends Scorer {\n ...\n private int toNonExcluded() throws IOException {\n int exclDoc = exclDisi.docID();\n int reqDoc = reqScorer.docID(); // may be excluded\n do { \n if (reqDoc < exclDoc) {\n return reqDoc; // reqScorer advanced to before exclScorer, ie. not excluded\n } else if (reqDoc > exclDoc) {\n /* [rusnyder] exclDisi is an instance of \n FunctionScoreQuery.FunctionFactorScorer here, but that class \n inherits CustomBoostFactorScorer which is where the \n pertinent logic is */\n /* [rusnyder] exclDisi.advance(reqDoc) -> see next snippet */\n exclDoc = exclDisi.advance(reqDoc);\n if (exclDoc == NO_MORE_DOCS) {\n exclDisi = null; // exhausted, no more exclusions\n return reqDoc;\n }\n if (exclDoc > reqDoc) {\n return reqDoc; // not excluded\n }\n }\n /* [rusnyder]: The function score query from the test should result in\n all documents being added to the exclusion set, whereas this nextDoc()\n is only invoked for sparse exclusion sets (i.e. - our test never hits this) */\n } while ((reqDoc = reqScorer.nextDoc()) != NO_MORE_DOCS);\n reqScorer = null; // exhausted, nothing left\n return NO_MORE_DOCS;\n }\n ...\n}\n```\n\n[`org.elasticsearch.common.lucene.search.function.CustomBoostFactorScorer.MinScoreNextDoc#aadvance(int)`](https://github.com/elastic/elasticsearch/blob/1.7/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java#L112)\n\n``` java\nabstract class CustomBoostFactorScorer extends Scorer {\n ...\n public class MinScoreNextDoc implements NextDoc {\n ...\n public int advance(int target) throws IOException {\n int doc = scorer.advance(target);\n if (doc == NO_MORE_DOCS) {\n return doc;\n }\n currentScore = innerScore();\n if (currentScore < minScore) {\n /* [rusnyder] This is the advancement that conflicts with\n the advancements made above */\n return scorer.nextDoc();\n }\n return doc;\n }\n }\n ...\n}\n```\n\nThe conflict is in between the call `scorer.nextDoc()` from within the `CustomBoostFactorScorer` (Elasticsearch) and the `reqScorer.nextDoc()` call from the ReqExclScorer (Lucene). I tried increasing the number of documents that I indexed in my test, and what I saw made lots of sense: For a contiguous sequence of documents within a single segment that all match the \"exclusion\" criteria (the code path I outlined is within the context of a single `AtomicReader` on a single segment), every other document from such a sequence is being returned. This makes plenty of sense when looking at the code because for each document that \"hits\" the exclusion set in the code path above, the `nextDoc()` function gets invoked twice which ultimately results in the following document within that sequence to be skipped.\n\nI'm new to this part of the codebase and am still rationalizing where exactly the breakdown is and which part of the code is responsible for sorting this out. My instinct leads me to believe that the onus is either on Elasticsearch's `CustomBoostFactorScorer` to stop advancing the underlying scorer or on Lucene's `ReqExclScorer` to better handle the case where the exclusion scorer needs to advance through multiple docs at a time to build the exclusion set.\n\nAnyway, I'll see if I can pull something together that makes sense, and in the meantime, and if there's any useful info you have in the meantime, I'd love to hear it!\n", "created_at": "2016-05-20T23:58:31Z" }, { "body": "Awesome work @rusnyder !!! Thank you for diving into this.\n\n@jpountz please could you take a look?\n", "created_at": "2016-05-23T10:36:25Z" }, { "body": "@rusnyder Thanks for digging! Base on the description of the issue, I think all that is needed to fix the bug is the following?\n\n``` patch\ndiff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java b/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java\nindex bcc785a..ded96e8 100644\n--- a/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java\n+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java\n@@ -116,7 +116,7 @@ abstract class CustomBoostFactorScorer extends Scorer {\n }\n currentScore = innerScore();\n if (currentScore < minScore) {\n- return scorer.nextDoc();\n+ return nextDoc();\n }\n return doc;\n }\n\n```\n", "created_at": "2016-05-23T11:44:35Z" }, { "body": "Wow @jpountz - that was easy. It's like you guys know what you're doing...\n\nThat worked like a charm! I'm happy to put out a PR w/ the test and the fix, but I'll leave that decision up to you!\n\nThanks again!!\n", "created_at": "2016-05-23T14:54:31Z" }, { "body": "Of note, I went back and tested this in 2.0.2, 2.1.2, 2.2.2, and 2.3.3 now that I had a better understand of what was going on and the test case more honed in, and there is a similar issue in 2.0.2, 2.1.2, and 2.2.2, except that all three of those versions consistently return zero documents for the \"must not(function score(match no docs))\" query when they should be returning all of the docs. 2.3.3 returns the right results for the query.\n\nI haven't had much time to dig in on those, but I took a look to see what would happen if I applied the same patch to those versions and this patch does NOT fix the 2.x versions. I'll dig more later, but since the `ReqExclScorer` more/less received an overhaul in Lucene 5.x, it'll take a bit more exploring for me to track down.\n", "created_at": "2016-05-26T20:15:51Z" }, { "body": "Thanks for the PR!\n\n> return zero documents for the \"must not(function score(match no docs))\" query when they should be returning all of the docs\n\nI could not replicate this bug, can you share the query you are running?\n", "created_at": "2016-05-27T13:16:56Z" }, { "body": "Sure thing. It's the same as the original steps to reproduce the 1.7 bug, but I added an optimize to guarantee all documents are on the same segment first:\n\n```\nPUT /fnscore-test-index\n{\n \"settings\": {\n \"index\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n}\n\nPOST /fnscore-test-index/_bulk\n{\"index\":{\"_type\":\"event\",\"_id\":\"1\"}}\n{\"field\":\"value-1\"}\n{\"index\":{\"_type\":\"event\",\"_id\":\"2\"}}\n{\"field\":\"value-2\"}\n\nPOST /fnscore-test-index/_refresh\nPOST /fnscore-test-index/_optimize?max_num_segments=1\n\nPOST /fnscore-test-index/event/_search?explain\n{\n \"query\": {\n \"bool\": {\n \"must_not\": [\n {\n \"function_score\": {\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"-1\",\n \"lang\": \"expression\"\n }\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\",\n \"min_score\": 0\n }\n }\n ]\n }\n }\n}\n```\n\nI hope I'm not going crazy, but that sequence is causing the query to return 0 events on fresh installs of the three aforementioned versions for me.\n", "created_at": "2016-05-27T13:29:37Z" }, { "body": "The default query for a function_score is a match_all query, so I think returning no hits in this case is the expected behoviour?\n", "created_at": "2016-05-27T13:45:51Z" }, { "body": "I assumed that was the default behavior was a `match_all` if no function was specified, not if a function was specified that matched no events. As per the [function score docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html):\n\n> If no query is given with a function this is equivalent to specifying `\"match_all\": {}`\n\nSince we _are_ specifying a function here that should be matching zero events, I'd think the docs from that `match_all` would all get filtered out by that function. To add to my expected behavior, with the index setup and documents as indexed in the previous comment, running just the function score query without the `must_not` _also_ provides 0 results, which is the behavior that I would expect:\n\n```\nPOST /fnscore-test-index/_search\n{\n \"query\": {\n \"function_score\": {\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"-1\",\n \"lang\": \"expression\"\n }\n }\n ],\n \"score_mode\": \"sum\",\n \"boost_mode\": \"replace\",\n \"min_score\": 0\n }\n }\n}\n```\n", "created_at": "2016-05-27T15:53:43Z" }, { "body": "@rusnyder Function score takes two kinds of queries: one that defines the matching docs, and then one per function that defines the documents to apply the function to (which is effectively just a filter since its score is never used). The sentence you are quoting is about the latter. Maybe we should reuse the parameter name and rewrite it to `If no _filter_ is given with a function this is equivalent to specifying \"match_all\": {}` to be clearer.\n", "created_at": "2016-05-30T09:48:43Z" } ], "number": 18315, "title": "Function score queries in must_not bool clauses producing incorrect results" }
{ "body": "When using a function score query within a must_not clause, the query was returning the wrong results.\n\nNote that this PR is pointed at 1.7 for two reasons:\n1. ES 2.0 upgraded to Lucene 5, where the `ReqExclScorer` class was rewritten in such a way that this bug doesn't manifest in 2.0, 2.1, or the 2.2 branches (though as I'm investigating, there may be a different bug that excludes all results...)\n2. ES 2.3 removed the `CustomBoostFactorScorer` class\n\nCloses #18315\n", "number": 18604, "review_comments": [], "title": "Updating CustomBoostFactorScorer to fix must_not + function score issue" }
{ "commits": [ { "message": "Updating CustomBoostFactorScorer to fix must_not + function score\nreturning the wrong results\n\nRelates to #18315" } ], "files": [ { "diff": "@@ -116,7 +116,7 @@ public int advance(int target) throws IOException {\n }\n currentScore = innerScore();\n if (currentScore < minScore) {\n- return scorer.nextDoc();\n+ return nextDoc();\n }\n return doc;\n }", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/CustomBoostFactorScorer.java", "status": "modified" }, { "diff": "@@ -2640,4 +2640,39 @@ public void testIdsQueryWithInvalidValues() throws Exception {\n equalTo(true));\n }\n }\n+\n+ @Test // see #18315\n+ public void testFunctionScoreWithinMustNotQuery() throws Exception {\n+ // Bug only presents when the number of documents exceeds the number of shards,\n+ // so I'm using a single shard to limit the scope of the issue\n+ prepareCreate(\"fnscore-test-index\", 1, ImmutableSettings.builder()\n+ .put(\"index.number_of_replicas\", 0)\n+ .put(\"index.number_of_shards\", 1)\n+ ).get();\n+ ensureYellow(\"fnscore-test-index\");\n+\n+ // Index two documents (doc count must exceed number of shards)\n+ // NOTE: Bug does not present consistently when indexing documents separately,\n+ // so I'm using a bulk request very intentionally\n+ client().prepareBulk()\n+ .add(client().prepareIndex(\"fnscore-test-index\", \"event\", \"1\").setSource(\"{\\\"field\\\":\\\"value-1\\\"}\"))\n+ .add(client().prepareIndex(\"fnscore-test-index\", \"event\", \"2\").setSource(\"{\\\"field\\\":\\\"value-2\\\"}\"))\n+ .add(client().prepareIndex(\"fnscore-test-index\", \"event\", \"3\").setSource(\"{\\\"field\\\":\\\"value-3\\\"}\"))\n+ .add(client().prepareIndex(\"fnscore-test-index\", \"event\", \"4\").setSource(\"{\\\"field\\\":\\\"value-4\\\"}\"))\n+ .add(client().prepareIndex(\"fnscore-test-index\", \"event\", \"5\").setSource(\"{\\\"field\\\":\\\"value-5\\\"}\"))\n+ .setRefresh(true)\n+ .get();\n+\n+ // The function script should never hit any documents, so the inverse\n+ // should always return all documents\n+ SearchResponse resp = client().prepareSearch(\"fnscore-test-index\")\n+ .setQuery(QueryBuilders.boolQuery()\n+ .mustNot(QueryBuilders.functionScoreQuery()\n+ .add(scriptFunction(\"-1\", \"expression\"))\n+ .scoreMode(\"sum\")\n+ .boostMode(\"replace\")\n+ .setMinScore(0)))\n+ .get();\n+ assertHitCount(resp, 5L);\n+ }\n }", "filename": "src/test/java/org/elasticsearch/search/query/SearchQueryTests.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**:\nmaster/alpha-3\n\n**Description of the problem including expected versus actual behavior**:\nRemovePluginCommand.java error message still [reports](https://github.com/elastic/elasticsearch/blob/c257e2c51f235853c4453a86e10e463813140fc9/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java#L74) `plugin` as the command name, instead of `elasticsearch-plugin`\n\n**Steps to reproduce**:\n\n```\nabonuccelli@w530 /opt/elk/PROD/node1/elasticsearch-5.0.0-alpha3/bin $ ./elasticsearch-plugin remove x-pack\n-> Removing x-pack...\nA tool for managing installed elasticsearch plugins\n\nCommands\n--------\nlist - Lists installed elasticsearch plugins\ninstall - Install a plugin\nremove - Removes a plugin from elasticsearch\n\nNon-option arguments:\ncommand \n\nOption Description \n------ ----------- \n-h, --help show help \n-s, --silent show minimal output\n-v, --verbose show verbose output\nERROR: Plugin x-pack not found. Run 'plugin list' to get list of installed plugins.\n```\n", "comments": [], "number": 18588, "title": "RemovePluginCommand.java reports older plugin command name" }
{ "body": "This pull request is a cleanup of two issues for plugins: \n- the plugin command specified in an error message for the remove\n plugin command is incorrect\n- remove the ability to specify a custom path for plugins\n\nCloses #18588\n", "number": 18594, "review_comments": [ { "body": "Maybe tell users that they can still just use a symlink if they really really want to keep things in a different place?\n", "created_at": "2016-05-26T15:07:50Z" }, { "body": "@jasontedor I think we should document the symlink option still? WDYT\n", "created_at": "2016-09-16T07:34:08Z" } ], "title": "Plugins cleanup" }
{ "commits": [ { "message": "Fix plugin command name in remove plugin command\n\nThis commit fixes the name of the plugin command that is output when a\nuser attempts to remove a plugin that does not exist." }, { "message": "Remove custom plugins path\n\nThis commit removes the ability to specify a custom plugins\npath. Instead, the plugins path will always be a subdirectory called\n\"plugins\" off of the home directory." }, { "message": "Fix when plugins directory is symlink\n\nThis commit fixes an issue with the plugins directory being a symbolic\nlink. Namely, the install plugins command attempts to always create the\nplugins directory just in case it does not exist. The JDK method used\nhere guarantees that the directory is created, and an exception is not\nthrown if the directory could not be created because it already\nexists. The problem is that this JDK method does not respect symlinks so\nits internal existence checks fails, it proceeds to attempt to create\nthe directory, but the directory creation fails because the symlink\nexists. This is documented as being not an issue. We work around this by\nchecking if there is a symlink where we expect the plugins directory to\nbe, and only attempt to create if not. We add a unit test that plugin\ninstallation to a symlinked plugins directory works as expected." } ], "files": [ { "diff": "@@ -244,7 +244,7 @@ static void addFilePermissions(Permissions policy, Environment environment) {\n addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), \"read,readlink\");\n addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), \"read,readlink\");\n addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), \"read,readlink\");\n- addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), \"read,readlink\");\n+ addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), \"read,readlink\");\n addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), \"read,readlink\");\n addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), \"read,readlink\");\n // read-write dirs", "filename": "core/src/main/java/org/elasticsearch/bootstrap/Security.java", "status": "modified" }, { "diff": "@@ -332,7 +332,6 @@ public void apply(Settings value, Settings current, Settings previous) {\n Environment.PATH_DATA_SETTING,\n Environment.PATH_HOME_SETTING,\n Environment.PATH_LOGS_SETTING,\n- Environment.PATH_PLUGINS_SETTING,\n Environment.PATH_REPO_SETTING,\n Environment.PATH_SCRIPTS_SETTING,\n Environment.PATH_SHARED_DATA_SETTING,", "filename": "core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java", "status": "modified" }, { "diff": "@@ -53,7 +53,6 @@ public class Environment {\n public static final Setting<List<String>> PATH_DATA_SETTING =\n Setting.listSetting(\"path.data\", Collections.emptyList(), Function.identity(), Property.NodeScope);\n public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString(\"path.logs\", Property.NodeScope);\n- public static final Setting<String> PATH_PLUGINS_SETTING = Setting.simpleString(\"path.plugins\", Property.NodeScope);\n public static final Setting<List<String>> PATH_REPO_SETTING =\n Setting.listSetting(\"path.repo\", Collections.emptyList(), Function.identity(), Property.NodeScope);\n public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString(\"path.shared_data\", Property.NodeScope);\n@@ -128,11 +127,7 @@ public Environment(Settings settings) {\n scriptsFile = configFile.resolve(\"scripts\");\n }\n \n- if (PATH_PLUGINS_SETTING.exists(settings)) {\n- pluginsFile = PathUtils.get(cleanPath(PATH_PLUGINS_SETTING.get(settings)));\n- } else {\n- pluginsFile = homeFile.resolve(\"plugins\");\n- }\n+ pluginsFile = homeFile.resolve(\"plugins\");\n \n List<String> dataPaths = PATH_DATA_SETTING.get(settings);\n if (dataPaths.isEmpty() == false) {", "filename": "core/src/main/java/org/elasticsearch/env/Environment.java", "status": "modified" }, { "diff": "@@ -304,7 +304,9 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserError {\n \n // be on the safe side: do not rely on that directories are always extracted\n // before their children (although this makes sense, but is it guaranteed?)\n- Files.createDirectories(targetFile.getParent());\n+ if (!Files.isSymbolicLink(targetFile.getParent())) {\n+ Files.createDirectories(targetFile.getParent());\n+ }\n if (entry.isDirectory() == false) {\n try (OutputStream out = Files.newOutputStream(targetFile)) {\n int len;", "filename": "core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java", "status": "modified" }, { "diff": "@@ -71,7 +71,7 @@ void execute(Terminal terminal, String pluginName, Map<String, String> settings)\n \n Path pluginDir = env.pluginsFile().resolve(pluginName);\n if (Files.exists(pluginDir) == false) {\n- throw new UserError(ExitCodes.USAGE, \"Plugin \" + pluginName + \" not found. Run 'plugin list' to get list of installed plugins.\");\n+ throw new UserError(ExitCodes.USAGE, \"plugin \" + pluginName + \" not found; run 'elasticsearch-plugin list' to get list of installed plugins\");\n }\n \n List<Path> pluginPaths = new ArrayList<>();", "filename": "core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java", "status": "modified" }, { "diff": "@@ -220,9 +220,6 @@ static Settings buildClientSettings(String tribeName, Settings globalSettings, S\n if (Environment.PATH_CONF_SETTING.exists(globalSettings)) {\n sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(globalSettings));\n }\n- if (Environment.PATH_PLUGINS_SETTING.exists(globalSettings)) {\n- sb.put(Environment.PATH_PLUGINS_SETTING.getKey(), Environment.PATH_PLUGINS_SETTING.get(globalSettings));\n- }\n if (Environment.PATH_LOGS_SETTING.exists(globalSettings)) {\n sb.put(Environment.PATH_LOGS_SETTING.getKey(), Environment.PATH_LOGS_SETTING.get(globalSettings));\n }", "filename": "core/src/main/java/org/elasticsearch/tribe/TribeService.java", "status": "modified" }, { "diff": "@@ -43,13 +43,11 @@ public void testEnvironmentSettings() {\n .put(\"node.name\", \"nodename\")\n .put(\"path.home\", \"some/path\")\n .put(\"path.conf\", \"conf/path\")\n- .put(\"path.plugins\", \"plugins/path\")\n .put(\"path.scripts\", \"scripts/path\")\n .put(\"path.logs\", \"logs/path\").build();\n Settings clientSettings = TribeService.buildClientSettings(\"tribe1\", globalSettings, Settings.EMPTY);\n assertEquals(\"some/path\", clientSettings.get(\"path.home\"));\n assertEquals(\"conf/path\", clientSettings.get(\"path.conf\"));\n- assertEquals(\"plugins/path\", clientSettings.get(\"path.plugins\"));\n assertEquals(\"scripts/path\", clientSettings.get(\"path.scripts\"));\n assertEquals(\"logs/path\", clientSettings.get(\"path.logs\"));\n ", "filename": "core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java", "status": "modified" }, { "diff": "@@ -180,18 +180,7 @@ set ES_JAVA_OPTS=\"-DproxyHost=host_name -DproxyPort=port_number\"\n bin/elasticsearch-plugin install analysis-icu\n -----------------------------------\n \n-=== Settings related to plugins\n-\n-[float]\n-=== Custom plugins directory\n-\n-The `plugins` directory can be changed from the default by adding the\n-following to the `elasticsearch.yml` config file:\n-\n-[source,yml]\n----------------------\n-path.plugins: /path/to/custom/plugins/dir\n----------------------\n+=== Plugins directory\n \n The default location of the `plugins` directory depends on which package you install:\n ", "filename": "docs/plugins/plugin-script.asciidoc", "status": "modified" }, { "diff": "@@ -112,3 +112,8 @@ Previously, Java system properties could be passed to the plugin\n command by passing `-D` style arguments directly to the plugin script.\n This is no longer permitted and such system properties must be passed\n via ES_JAVA_OPTS.\n+\n+==== Custom plugins path\n+\n+The ability to specify a custom plugins path via `path.plugins` has\n+been removed.", "filename": "docs/reference/migration/migrate_5_0/plugins.asciidoc", "status": "modified" }, { "diff": "@@ -87,7 +87,6 @@ configuration options are passed down from the tribe node to each node client:\n * `transport.publish_host`\n * `path.home`\n * `path.conf`\n-* `path.plugins`\n * `path.logs`\n * `path.scripts`\n * `shield.*`", "filename": "docs/reference/modules/tribe.asciidoc", "status": "modified" }, { "diff": "@@ -174,7 +174,6 @@ locations for a Debian-based system:\n | plugins\n | Plugin files location. Each plugin will be contained in a subdirectory.\n | /usr/share/elasticsearch/plugins\n- | path.plugins\n \n | repo\n | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here.", "filename": "docs/reference/setup/install/deb.asciidoc", "status": "modified" }, { "diff": "@@ -160,7 +160,6 @@ locations for an RPM-based system:\n | plugins\n | Plugin files location. Each plugin will be contained in a subdirectory.\n | /usr/share/elasticsearch/plugins\n- | path.plugins\n \n | repo\n | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here.", "filename": "docs/reference/setup/install/rpm.asciidoc", "status": "modified" }, { "diff": "@@ -228,7 +228,6 @@ directory so that you do not delete important data later on.\n | plugins\n | Plugin files location. Each plugin will be contained in a subdirectory.\n | %ES_HOME%\\plugins\n- | path.plugins\n \n | repo\n | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here.", "filename": "docs/reference/setup/install/windows.asciidoc", "status": "modified" }, { "diff": "@@ -147,7 +147,6 @@ directory so that you do not delete important data later on.\n | plugins\n | Plugin files location. Each plugin will be contained in a subdirectory.\n | $ES_HOME/plugins\n- | path.plugins\n \n | repo\n | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here.", "filename": "docs/reference/setup/install/zip-targz.asciidoc", "status": "modified" }, { "diff": "@@ -77,7 +77,6 @@ public void testEnvironmentPaths() throws Exception {\n settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.resolve(\"home\").toString());\n settingsBuilder.put(Environment.PATH_CONF_SETTING.getKey(), esHome.resolve(\"conf\").toString());\n settingsBuilder.put(Environment.PATH_SCRIPTS_SETTING.getKey(), esHome.resolve(\"scripts\").toString());\n- settingsBuilder.put(Environment.PATH_PLUGINS_SETTING.getKey(), esHome.resolve(\"plugins\").toString());\n settingsBuilder.putArray(Environment.PATH_DATA_SETTING.getKey(), esHome.resolve(\"data1\").toString(), esHome.resolve(\"data2\").toString());\n settingsBuilder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), esHome.resolve(\"custom\").toString());\n settingsBuilder.put(Environment.PATH_LOGS_SETTING.getKey(), esHome.resolve(\"logs\").toString());", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java", "status": "modified" }, { "diff": "@@ -73,7 +73,7 @@ static void assertRemoveCleaned(Environment env) throws IOException {\n \n public void testMissing() throws Exception {\n UserError e = expectThrows(UserError.class, () -> removePlugin(\"dne\", home));\n- assertTrue(e.getMessage(), e.getMessage().contains(\"Plugin dne not found\"));\n+ assertTrue(e.getMessage(), e.getMessage().contains(\"plugin dne not found\"));\n assertRemoveCleaned(env);\n }\n ", "filename": "qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java", "status": "modified" }, { "diff": "@@ -113,25 +113,26 @@ fi\n fi\n }\n \n-@test \"[$GROUP] install jvm-example plugin with a custom path.plugins\" {\n+@test \"[$GROUP] install jvm-example plugin with a symlinked plugins path\" {\n # Clean up after the last time this test was run\n rm -rf /tmp/plugins.*\n+ rm -rf /tmp/old_plugins.*\n \n- local oldPlugins=\"$ESPLUGINS\"\n- export ESPLUGINS=$(mktemp -d -t 'plugins.XXXX')\n-\n- # Modify the path.plugins setting in configuration file\n- echo \"path.plugins: $ESPLUGINS\" >> \"$ESCONFIG/elasticsearch.yml\"\n- chown -R elasticsearch:elasticsearch \"$ESPLUGINS\"\n+ rm -rf \"$ESPLUGINS\"\n+ local es_plugins=$(mktemp -d -t 'plugins.XXXX')\n+ chown -R elasticsearch:elasticsearch \"$es_plugins\"\n+ ln -s \"$es_plugins\" \"$ESPLUGINS\"\n \n install_jvm_example\n start_elasticsearch_service\n- # check that configuration was actually picked up\n+ # check that symlinked plugin was actually picked up\n curl -s localhost:9200/_cat/configured_example | sed 's/ *$//' > /tmp/installed\n echo \"foo\" > /tmp/expected\n diff /tmp/installed /tmp/expected\n stop_elasticsearch_service\n remove_jvm_example\n+\n+ unlink \"$ESPLUGINS\"\n }\n \n @test \"[$GROUP] install jvm-example plugin with a custom CONFIG_DIR\" {", "filename": "qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash", "status": "modified" } ] }
{ "body": "Since we switched to points to index ip addresses, a couple things are not working anymore:\n- [x] range queries only support inclusive bounds (#17777)\n- [x] range aggregations do not work anymore (#17859)\n- [x] sorting on ip addresses fails since it tries to write binary bytes as an utf8 string when rendering sort values (#17959)\n- [x] sorting and aggregations across old and new indices do not work since the coordinating node gets longs from some shards and binary values from other shards and does not know how to reconcile them (#18593)\n- [x] terms aggregations return binary keys (#18003)\n", "comments": [], "number": 17971, "title": "Not all features work on ip fields" }
{ "body": "The fact that ip fields used a different doc values representation in 2.x causes\nissues when querying 2.x and 5.0 indices in the same request. This changes 2.x\ndoc values on ip fields/2.x to be hidden behind binary doc values that use the\nsame encoding as 5.0. This way the coordinating node will be able to merge shard\nresponses that have different major versions.\n\nOne known issue is that this makes sorting/aggregating slower on ip fields for\nindices that have been generated with elasticsearch 2.x.\n\nRelates to #17971\n", "number": 18593, "review_comments": [ { "body": "By this comment do we mean \"there is nothing to do here\" or \"this should never be called\"? If it's the latter should we throw an `UnsupportedOperationException` instead? If it's the former could we instead change the comment to `// nothing to do`?\n", "created_at": "2016-05-31T08:59:10Z" }, { "body": "I am wondering, given that this duplicates the range binary search from the other implementation, if we should abstract that away somewhere so any fix needed to this logic only needs to be made in one place?\n", "created_at": "2016-05-31T09:10:41Z" }, { "body": "It is the former. I will fix the comment.\n", "created_at": "2016-05-31T09:13:29Z" }, { "body": "I thought about it but could not find a way to do it cleanly without relying on boxed Longs, which would make things slower.\n", "created_at": "2016-05-31T09:16:07Z" }, { "body": "ok, fair enough\n", "created_at": "2016-05-31T09:23:25Z" } ], "title": "Make ip fields backward-compatible at query time." }
{ "commits": [ { "message": "Make ip fields backward-compatible at query time. #18593\n\nThe fact that ip fields used a different doc values representation in 2.x causes\nissues when querying 2.x and 5.0 indices in the same request. This changes 2.x\ndoc values on ip fields/2.x to be hidden behind binary doc values that use the\nsame encoding as 5.0. This way the coordinating node will be able to merge shard\nresponses that have different major versions.\n\nOne known issue is that this makes sorting/aggregating slower on ip fields for\nindices that have been generated with elasticsearch 2.x." } ], "files": [ { "diff": "@@ -35,20 +35,21 @@\n import org.elasticsearch.common.network.Cidrs;\n import org.elasticsearch.common.network.InetAddresses;\n import org.elasticsearch.common.settings.Settings;\n-import org.elasticsearch.common.unit.Fuzziness;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.index.IndexSettings;\n import org.elasticsearch.index.fielddata.IndexFieldData;\n-import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;\n-import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;\n+import org.elasticsearch.index.fielddata.IndexFieldDataCache;\n import org.elasticsearch.index.mapper.MappedFieldType;\n import org.elasticsearch.index.mapper.Mapper;\n import org.elasticsearch.index.mapper.MapperParsingException;\n+import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.mapper.ParseContext;\n import org.elasticsearch.index.mapper.core.LegacyLongFieldMapper;\n import org.elasticsearch.index.mapper.core.LegacyLongFieldMapper.CustomLongNumericField;\n import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper;\n import org.elasticsearch.index.query.QueryShardContext;\n+import org.elasticsearch.indices.breaker.CircuitBreakerService;\n import org.elasticsearch.search.DocValueFormat;\n import org.joda.time.DateTimeZone;\n \n@@ -249,7 +250,14 @@ public FieldStats stats(IndexReader reader) throws IOException {\n @Override\n public IndexFieldData.Builder fielddataBuilder() {\n failIfNoDocValues();\n- return new DocValuesIndexFieldData.Builder().numericType(NumericType.LONG);\n+ return new IndexFieldData.Builder() {\n+ @Override\n+ public IndexFieldData<?> build(IndexSettings indexSettings,\n+ MappedFieldType fieldType, IndexFieldDataCache cache,\n+ CircuitBreakerService breakerService, MapperService mapperService) {\n+ return new LegacyIpIndexFieldData(indexSettings.getIndex(), name());\n+ }\n+ };\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/index/mapper/ip/LegacyIpFieldMapper.java", "status": "modified" }, { "diff": "@@ -0,0 +1,145 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.index.mapper.ip;\n+\n+import java.io.IOException;\n+import java.net.InetAddress;\n+import java.net.UnknownHostException;\n+import java.nio.ByteBuffer;\n+\n+import org.apache.lucene.document.InetAddressPoint;\n+import org.apache.lucene.index.DocValues;\n+import org.apache.lucene.index.IndexReader;\n+import org.apache.lucene.index.LeafReaderContext;\n+import org.apache.lucene.index.SortedNumericDocValues;\n+import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.common.logging.ESLogger;\n+import org.elasticsearch.common.logging.Loggers;\n+import org.elasticsearch.index.Index;\n+import org.elasticsearch.index.fielddata.AtomicFieldData;\n+import org.elasticsearch.index.fielddata.IndexFieldData;\n+import org.elasticsearch.index.fielddata.ScriptDocValues;\n+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;\n+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;\n+import org.elasticsearch.search.MultiValueMode;\n+\n+final class LegacyIpIndexFieldData implements IndexFieldData<AtomicFieldData> {\n+\n+ protected final Index index;\n+ protected final String fieldName;\n+ protected final ESLogger logger;\n+\n+ public LegacyIpIndexFieldData(Index index, String fieldName) {\n+ this.index = index;\n+ this.fieldName = fieldName;\n+ this.logger = Loggers.getLogger(getClass());\n+ }\n+\n+ public final String getFieldName() {\n+ return fieldName;\n+ }\n+\n+ public final void clear() {\n+ // nothing to do\n+ }\n+\n+ public final void clear(IndexReader reader) {\n+ // nothing to do\n+ }\n+\n+ public final Index index() {\n+ return index;\n+ }\n+\n+ @Override\n+ public AtomicFieldData load(LeafReaderContext context) {\n+ return new AtomicFieldData() {\n+ \n+ @Override\n+ public void close() {\n+ // no-op\n+ }\n+ \n+ @Override\n+ public long ramBytesUsed() {\n+ return 0;\n+ }\n+ \n+ @Override\n+ public ScriptDocValues<?> getScriptValues() {\n+ throw new UnsupportedOperationException(\"Cannot run scripts on ip fields\");\n+ }\n+ \n+ @Override\n+ public SortedBinaryDocValues getBytesValues() {\n+ SortedNumericDocValues values;\n+ try {\n+ values = DocValues.getSortedNumeric(context.reader(), fieldName);\n+ } catch (IOException e) {\n+ throw new IllegalStateException(\"Cannot load doc values\", e);\n+ }\n+ return new SortedBinaryDocValues() {\n+\n+ final ByteBuffer scratch = ByteBuffer.allocate(4);\n+\n+ @Override\n+ public BytesRef valueAt(int index) {\n+ // we do not need to reorder ip addresses since both the numeric\n+ // encoding of LegacyIpFieldMapper and the binary encoding of\n+ // IpFieldMapper match the sort order of ip addresses\n+ long ip = values.valueAt(index);\n+ scratch.putInt(0, (int) ip);\n+ InetAddress inet;\n+ try {\n+ inet = InetAddress.getByAddress(scratch.array());\n+ } catch (UnknownHostException e) {\n+ throw new IllegalStateException(\"Cannot happen\", e);\n+ }\n+ byte[] encoded = InetAddressPoint.encode(inet);\n+ return new BytesRef(encoded);\n+ }\n+ \n+ @Override\n+ public void setDocument(int docId) {\n+ values.setDocument(docId);\n+ }\n+ \n+ @Override\n+ public int count() {\n+ return values.count();\n+ }\n+ };\n+ }\n+ };\n+ }\n+\n+ @Override\n+ public AtomicFieldData loadDirect(LeafReaderContext context)\n+ throws Exception {\n+ return load(context);\n+ }\n+\n+ @Override\n+ public IndexFieldData.XFieldComparatorSource comparatorSource(\n+ Object missingValue, MultiValueMode sortMode, Nested nested) {\n+ return new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested);\n+ }\n+\n+}", "filename": "core/src/main/java/org/elasticsearch/index/mapper/ip/LegacyIpIndexFieldData.java", "status": "added" }, { "diff": "@@ -30,7 +30,6 @@\n import org.elasticsearch.common.joda.Joda;\n import org.elasticsearch.common.network.InetAddresses;\n import org.elasticsearch.common.network.NetworkAddress;\n-import org.elasticsearch.index.mapper.ip.LegacyIpFieldMapper;\n import org.joda.time.DateTimeZone;\n \n import java.io.IOException;\n@@ -284,12 +283,12 @@ public void writeTo(StreamOutput out) throws IOException {\n \n @Override\n public String format(long value) {\n- return LegacyIpFieldMapper.longToIp(value);\n+ throw new UnsupportedOperationException();\n }\n \n @Override\n public String format(double value) {\n- return format((long) value);\n+ throw new UnsupportedOperationException();\n }\n \n @Override\n@@ -301,13 +300,12 @@ public String format(BytesRef value) {\n \n @Override\n public long parseLong(String value, boolean roundUp, Callable<Long> now) {\n- // TODO: throw exception in 6.0\n- return LegacyIpFieldMapper.ipToLong(value);\n+ throw new UnsupportedOperationException();\n }\n \n @Override\n public double parseDouble(String value, boolean roundUp, Callable<Long> now) {\n- return parseLong(value, roundUp, now);\n+ throw new UnsupportedOperationException();\n }\n \n @Override", "filename": "core/src/main/java/org/elasticsearch/search/DocValueFormat.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.apache.lucene.index.LeafReaderContext;\n import org.apache.lucene.index.SortedSetDocValues;\n import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.Aggregator;\n import org.elasticsearch.search.aggregations.AggregatorFactories;\n@@ -104,8 +105,16 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I\n collectBucket(sub, doc, bucket);\n }\n };\n+ } else {\n+ SortedBinaryDocValues values = valuesSource.bytesValues(ctx);\n+ return new SortedBinaryRangeLeafCollector(values, ranges, sub) {\n+ @Override\n+ protected void doCollect(LeafBucketCollector sub, int doc, long bucket)\n+ throws IOException {\n+ collectBucket(sub, doc, bucket);\n+ }\n+ };\n }\n- throw new IllegalArgumentException(\"binary range aggregation expects a values source that supports ordinals\");\n }\n \n static abstract class SortedSetRangeLeafCollector extends LeafBucketCollectorBase {\n@@ -214,6 +223,99 @@ private int collect(int doc, long ord, long bucket, int lowBound) throws IOExcep\n protected abstract void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException;\n }\n \n+ static abstract class SortedBinaryRangeLeafCollector extends LeafBucketCollectorBase {\n+\n+ final Range[] ranges;\n+ final BytesRef[] maxTos;\n+ final SortedBinaryDocValues values;\n+ final LeafBucketCollector sub;\n+\n+ SortedBinaryRangeLeafCollector(SortedBinaryDocValues values,\n+ Range[] ranges, LeafBucketCollector sub) {\n+ super(sub, values);\n+ for (int i = 1; i < ranges.length; ++i) {\n+ if (RANGE_COMPARATOR.compare(ranges[i-1], ranges[i]) > 0) {\n+ throw new IllegalArgumentException(\"Ranges must be sorted\");\n+ }\n+ }\n+ this.values = values;\n+ this.sub = sub;\n+ this.ranges = ranges;\n+ maxTos = new BytesRef[ranges.length];\n+ if (ranges.length > 0) {\n+ maxTos[0] = ranges[0].to;\n+ }\n+ for (int i = 1; i < ranges.length; ++i) {\n+ if (compare(ranges[i].to, maxTos[i-1], -1) >= 0) {\n+ maxTos[i] = ranges[i].to;\n+ } else {\n+ maxTos[i] = maxTos[i-1];\n+ }\n+ }\n+ }\n+\n+ @Override\n+ public void collect(int doc, long bucket) throws IOException {\n+ values.setDocument(doc);\n+ final int valuesCount = values.count();\n+ for (int i = 0, lo = 0; i < valuesCount; ++i) {\n+ final BytesRef value = values.valueAt(i);\n+ lo = collect(doc, value, bucket, lo);\n+ }\n+ }\n+\n+ private int collect(int doc, BytesRef value, long bucket, int lowBound) throws IOException {\n+ int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes\n+ int mid = (lo + hi) >>> 1;\n+ while (lo <= hi) {\n+ if (compare(value, ranges[mid].from, 1) < 0) {\n+ hi = mid - 1;\n+ } else if (compare(value, maxTos[mid], -1) >= 0) {\n+ lo = mid + 1;\n+ } else {\n+ break;\n+ }\n+ mid = (lo + hi) >>> 1;\n+ }\n+ if (lo > hi) return lo; // no potential candidate\n+\n+ // binary search the lower bound\n+ int startLo = lo, startHi = mid;\n+ while (startLo <= startHi) {\n+ final int startMid = (startLo + startHi) >>> 1;\n+ if (compare(value, maxTos[startMid], -1) >= 0) {\n+ startLo = startMid + 1;\n+ } else {\n+ startHi = startMid - 1;\n+ }\n+ }\n+\n+ // binary search the upper bound\n+ int endLo = mid, endHi = hi;\n+ while (endLo <= endHi) {\n+ final int endMid = (endLo + endHi) >>> 1;\n+ if (compare(value, ranges[endMid].from, 1) < 0) {\n+ endHi = endMid - 1;\n+ } else {\n+ endLo = endMid + 1;\n+ }\n+ }\n+\n+ assert startLo == lowBound || compare(value, maxTos[startLo - 1], -1) >= 0;\n+ assert endHi == ranges.length - 1 || compare(value, ranges[endHi + 1].from, 1) < 0;\n+\n+ for (int i = startLo; i <= endHi; ++i) {\n+ if (compare(value, ranges[i].to, -1) < 0) {\n+ doCollect(sub, doc, bucket * ranges.length + i);\n+ }\n+ }\n+\n+ return endHi + 1;\n+ }\n+\n+ protected abstract void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException;\n+ }\n+\n @Override\n public InternalAggregation buildAggregation(long bucket) throws IOException {\n InternalBinaryRange.Bucket[] buckets = new InternalBinaryRange.Bucket[ranges.length];", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java", "status": "modified" }, { "diff": "@@ -0,0 +1,94 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.bwcompat;\n+\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n+\n+import java.util.Collection;\n+\n+import org.elasticsearch.Version;\n+import org.elasticsearch.action.search.SearchResponse;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.plugins.Plugin;\n+import org.elasticsearch.search.aggregations.AggregationBuilders;\n+import org.elasticsearch.search.aggregations.bucket.range.Range;\n+import org.elasticsearch.search.aggregations.bucket.terms.Terms;\n+import org.elasticsearch.search.sort.SortBuilders;\n+import org.elasticsearch.test.ESIntegTestCase;\n+import org.elasticsearch.test.InternalSettingsPlugin;\n+\n+@ESIntegTestCase.SuiteScopeTestCase\n+public class IpFieldBwCompatIT extends ESIntegTestCase {\n+\n+ @Override\n+ protected Collection<Class<? extends Plugin>> nodePlugins() {\n+ return pluginList(InternalSettingsPlugin.class); // uses index.merge.enabled\n+ }\n+\n+ @Override\n+ public void setupSuiteScopeCluster() throws Exception {\n+ assertAcked(prepareCreate(\"old_index\")\n+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_3.id)\n+ .addMapping(\"type\", \"ip_field\", \"type=ip\"));\n+ assertAcked(prepareCreate(\"new_index\")\n+ .addMapping(\"type\", \"ip_field\", \"type=ip\"));\n+\n+ indexRandom(true,\n+ client().prepareIndex(\"old_index\", \"type\", \"1\").setSource(\"ip_field\", \"127.0.0.1\"),\n+ client().prepareIndex(\"new_index\", \"type\", \"1\").setSource(\"ip_field\", \"127.0.0.1\"),\n+ client().prepareIndex(\"new_index\", \"type\", \"2\").setSource(\"ip_field\", \"::1\"));\n+ }\n+\n+ public void testSort() {\n+ SearchResponse response = client().prepareSearch(\"old_index\", \"new_index\")\n+ .addSort(SortBuilders.fieldSort(\"ip_field\")).get();\n+ assertNoFailures(response);\n+ assertEquals(3, response.getHits().totalHits());\n+ assertEquals(\"::1\", response.getHits().getAt(0).getSortValues()[0]);\n+ assertEquals(\"127.0.0.1\", response.getHits().getAt(1).getSortValues()[0]);\n+ assertEquals(\"127.0.0.1\", response.getHits().getAt(2).getSortValues()[0]);\n+ }\n+\n+ public void testRangeAgg() {\n+ SearchResponse response = client().prepareSearch(\"old_index\", \"new_index\")\n+ .addAggregation(AggregationBuilders.ipRange(\"ip_range\").field(\"ip_field\")\n+ .addMaskRange(\"127.0.0.1/16\")\n+ .addMaskRange(\"::1/64\")).get();\n+ assertNoFailures(response);\n+ assertEquals(3, response.getHits().totalHits());\n+ Range range = response.getAggregations().get(\"ip_range\");\n+ assertEquals(2, range.getBuckets().size());\n+ assertEquals(\"::1/64\", range.getBuckets().get(0).getKeyAsString());\n+ assertEquals(3, range.getBuckets().get(0).getDocCount());\n+ assertEquals(\"127.0.0.1/16\", range.getBuckets().get(1).getKeyAsString());\n+ assertEquals(2, range.getBuckets().get(1).getDocCount());\n+ }\n+\n+ public void testTermsAgg() {\n+ SearchResponse response = client().prepareSearch(\"old_index\", \"new_index\")\n+ .addAggregation(AggregationBuilders.terms(\"ip_terms\").field(\"ip_field\")).get();\n+ assertNoFailures(response);\n+ assertEquals(3, response.getHits().totalHits());\n+ Terms terms = response.getAggregations().get(\"ip_terms\");\n+ assertEquals(2, terms.getBuckets().size());\n+ assertEquals(2, terms.getBucketByKey(\"127.0.0.1\").getDocCount());\n+ assertEquals(1, terms.getBucketByKey(\"::1\").getDocCount());\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java", "status": "added" }, { "diff": "@@ -26,7 +26,9 @@\n import org.apache.lucene.index.SortedSetDocValues;\n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.TestUtil;\n+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;\n import org.elasticsearch.search.aggregations.LeafBucketCollector;\n+import org.elasticsearch.search.aggregations.bucket.range.BinaryRangeAggregator.SortedBinaryRangeLeafCollector;\n import org.elasticsearch.search.aggregations.bucket.range.BinaryRangeAggregator.SortedSetRangeLeafCollector;\n import org.elasticsearch.test.ESTestCase;\n \n@@ -139,4 +141,101 @@ public void testSortedSetRangeLeafCollectorMultiValued() throws Exception {\n doTestSortedSetRangeLeafCollector(5);\n }\n }\n+\n+ private static class FakeSortedBinaryDocValues extends SortedBinaryDocValues {\n+\n+ private final BytesRef[] terms;\n+ long[] ords;\n+\n+ FakeSortedBinaryDocValues(BytesRef[] terms) {\n+ this.terms = terms;\n+ }\n+\n+ @Override\n+ public void setDocument(int docID) {\n+ // no-op\n+ }\n+\n+ @Override\n+ public int count() {\n+ return ords.length;\n+ }\n+\n+ @Override\n+ public BytesRef valueAt(int index) {\n+ return terms[(int) ords[index]];\n+ }\n+\n+ }\n+\n+ private void doTestSortedBinaryRangeLeafCollector(int maxNumValuesPerDoc) throws Exception {\n+ final Set<BytesRef> termSet = new HashSet<>();\n+ final int numTerms = TestUtil.nextInt(random(), maxNumValuesPerDoc, 100);\n+ while (termSet.size() < numTerms) {\n+ termSet.add(new BytesRef(TestUtil.randomSimpleString(random(), randomInt(2))));\n+ }\n+ final BytesRef[] terms = termSet.toArray(new BytesRef[0]);\n+ Arrays.sort(terms);\n+\n+ final int numRanges = randomIntBetween(1, 10);\n+ BinaryRangeAggregator.Range[] ranges = new BinaryRangeAggregator.Range[numRanges];\n+ for (int i = 0; i < numRanges; ++i) {\n+ ranges[i] = new BinaryRangeAggregator.Range(Integer.toString(i),\n+ randomBoolean() ? null : new BytesRef(TestUtil.randomSimpleString(random(), randomInt(2))),\n+ randomBoolean() ? null : new BytesRef(TestUtil.randomSimpleString(random(), randomInt(2))));\n+ }\n+ Arrays.sort(ranges, BinaryRangeAggregator.RANGE_COMPARATOR);\n+\n+ FakeSortedBinaryDocValues values = new FakeSortedBinaryDocValues(terms);\n+ final int[] counts = new int[ranges.length];\n+ SortedBinaryRangeLeafCollector collector = new SortedBinaryRangeLeafCollector(values, ranges, null) {\n+ @Override\n+ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException {\n+ counts[(int) bucket]++;\n+ }\n+ };\n+\n+ final int[] expectedCounts = new int[ranges.length];\n+ final int maxDoc = randomIntBetween(5, 10);\n+ for (int doc = 0; doc < maxDoc; ++doc) {\n+ LongHashSet ordinalSet = new LongHashSet();\n+ final int numValues = randomInt(maxNumValuesPerDoc);\n+ while (ordinalSet.size() < numValues) {\n+ ordinalSet.add(random().nextInt(terms.length));\n+ }\n+ final long[] ords = ordinalSet.toArray();\n+ Arrays.sort(ords);\n+ values.ords = ords;\n+\n+ // simulate aggregation\n+ collector.collect(doc);\n+\n+ // now do it the naive way\n+ for (int i = 0; i < ranges.length; ++i) {\n+ for (long ord : ords) {\n+ BytesRef term = terms[(int) ord];\n+ if ((ranges[i].from == null || ranges[i].from.compareTo(term) <= 0)\n+ && (ranges[i].to == null || ranges[i].to.compareTo(term) > 0)) {\n+ expectedCounts[i]++;\n+ break;\n+ }\n+ }\n+ }\n+ }\n+ assertArrayEquals(expectedCounts, counts);\n+ }\n+\n+ public void testSortedBinaryRangeLeafCollectorSingleValued() throws Exception {\n+ final int iters = randomInt(10);\n+ for (int i = 0; i < iters; ++i) {\n+ doTestSortedBinaryRangeLeafCollector(1);\n+ }\n+ }\n+\n+ public void testSortedBinaryRangeLeafCollectorMultiValued() throws Exception {\n+ final int iters = randomInt(10);\n+ for (int i = 0; i < iters; ++i) {\n+ doTestSortedBinaryRangeLeafCollector(5);\n+ }\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java", "status": "modified" } ] }
{ "body": "I only limit `bulk` and `index` thread pools.\n\nCloses #15582 \n", "comments": [ { "body": "I'm also curious if there is a way to give feedback to users without breaking existing installations. Maybe @s1monw has a clue?\n", "created_at": "2015-12-22T21:54:00Z" }, { "body": "@mikemccand to fix this the right way I agree we should be loud. I think we have to upgrade the settings in the clusterstate first and that can on work well if we do it only in a major version? On the other hand what if you have machines with 8 CPUs and others with 80 CPU? I mean this is really a per-node setting and I think we should maybe not allow to set this globally? I think you patch is good as a first step but we should really think about how to expose these settings?\n", "created_at": "2016-01-05T15:08:58Z" }, { "body": "I talked to @s1monw ... we decided to add a `logger.warn` here if you tried to set to a too-large value, but otherwise be silent for now, and to open a separate issue that this setting should not be global and dynamically updatable.\n", "created_at": "2016-01-05T15:34:26Z" }, { "body": "OK I pushed another commit w/ the feedback; I think it's ready.\n", "created_at": "2016-01-05T15:59:59Z" }, { "body": "LGTM\n", "created_at": "2016-01-08T19:27:36Z" }, { "body": "Thanks @s1monw, I'll port to 2.2.0 as well.\n", "created_at": "2016-01-08T19:33:35Z" } ], "number": 15585, "title": "Limit the max size of bulk and index thread pools to bounded number of processors" }
{ "body": "In #15585 we put hard limits on the maximum thread pool size for bulk and index queue ... but we didn't document the limits.\n", "number": 18590, "review_comments": [], "title": "Document the hard limits from on index and bulk thread pool sizes" }
{ "commits": [ { "message": "Document the hard limits from #15585 on index and bulk thread pool sizes" } ], "files": [ { "diff": "@@ -16,7 +16,8 @@ There are several thread pools, but the important ones include:\n `index`::\n For index/delete operations. Thread pool type is `fixed`\n with a size of `# of available processors`,\n- queue_size of `200`.\n+ queue_size of `200`. The maximum size for this pool\n+ is `# of available processors`.\n \n `search`::\n For count/search/suggest operations. Thread pool type is `fixed`\n@@ -31,7 +32,8 @@ There are several thread pools, but the important ones include:\n `bulk`::\n For bulk operations. Thread pool type is `fixed`\n with a size of `# of available processors`,\n- queue_size of `50`.\n+ queue_size of `50`. The maximum size for this pool\n+ is `# of available processors`.\n \n `percolate`::\n For percolate operations. Thread pool type is `fixed`", "filename": "docs/reference/modules/threadpool.asciidoc", "status": "modified" } ] }
{ "body": "Probably when we updated Azure SDK, we introduced a regression.\nActually, we are not able to remove files anymore.\n\nFor example, if you register a new azure repository, the snapshot service tries to create a temp file and then remove it.\nRemoving does not work and you can see in logs:\n\n```\n[2016-05-18 11:03:24,914][WARN ][org.elasticsearch.cloud.azure.blobstore] [azure] can not remove [tests-ilmRPJ8URU-sh18yj38O6g/] in container {elasticsearch-snapshots}: The specified blob does not exist.\n```\n\nThis fix deals with that. It now list all the files in a flatten mode, remove in the full URL the server and the container name.\n\nAs an example, when you are removing a blob which full name is `https://dpi24329.blob.core.windows.net/elasticsearch-snapshots/bar/test` you need to actually call Azure SDK with `bar/test` as the path, `elasticsearch-snapshots` is the container.\n\nTo run the test, you need to pass some parameters: `-Dtests.thirdparty=true -Dtests.config=/path/to/elasticsearch.yml`\n\nWhere `elasticsearch.yml` contains something like:\n\n```\ncloud.azure.storage.default.account: account\ncloud.azure.storage.default.key: key\n```\n\nRelated to #16472\nCloses #18436.\n\nThis PR also fixes a missing default value for setting `repositories.azure.container` and adds more logs.\n", "comments": [ { "body": "@imotov Could you give a look at this?\n", "created_at": "2016-05-18T15:28:02Z" }, { "body": "Left a few minor comments. Otherwise LGTM.\n", "created_at": "2016-05-20T12:15:02Z" }, { "body": "@imotov I pushed some other changes. Do you mind giving a final review for it?\n\nThanks! \n", "created_at": "2016-05-20T18:10:55Z" }, { "body": "@clintongormley I believe I should backport this to 2.x branch right?\nAnd probably to 1.7 as well as I believe latest 1.7 versions are also broken.\n", "created_at": "2016-05-20T18:11:56Z" }, { "body": "> @clintongormley I believe I should backport this to 2.x branch right? And probably to 1.7 as well as I believe latest 1.7 versions are also broken.\n\nYes please\n", "created_at": "2016-05-23T10:18:23Z" }, { "body": "Left a minor comment about a comment. I still think `blob_container` doesn't match our naming convention and should be renamed to `blobContainer`. \n", "created_at": "2016-05-23T17:11:29Z" }, { "body": "> Left a minor comment about a comment. I still think blob_container doesn't match our naming convention and should be renamed to blobContainer.\n\nI do agree. I just missed the comment! Sorry!\n", "created_at": "2016-05-25T08:50:31Z" }, { "body": "For the record:\n- 2.x: https://github.com/elastic/elasticsearch/issues/18571\n- 1.7: https://github.com/elastic/elasticsearch-cloud-azure/issues/117\n", "created_at": "2016-05-25T16:18:06Z" } ], "number": 18451, "title": "Fix azure files removal" }
{ "body": "Probably when we updated Azure SDK, we introduced a regression.\nActually, we are not able to remove files anymore.\n\nFor example, if you register a new azure repository, the snapshot service tries to create a temp file and then remove it.\nRemoving does not work and you can see in logs:\n\n```\n[2016-05-18 11:03:24,914][WARN ][org.elasticsearch.cloud.azure.blobstore] [azure] can not remove [tests-ilmRPJ8URU-sh18yj38O6g/] in container {elasticsearch-snapshots}: The specified blob does not exist.\n```\n\nThis fix deals with that. It now list all the files in a flatten mode, remove in the full URL the server and the container name.\n\nAs an example, when you are removing a blob which full name is `https://dpi24329.blob.core.windows.net/elasticsearch-snapshots/bar/test` you need to actually call Azure SDK with `bar/test` as the path, `elasticsearch-snapshots` is the container.\n\nRelated to #16472.\nRelated to #18436.\n\nBackport of #18451 in 2.x branch\n\nTo test it, I ran some manual tests:\n\nOn my laptop, create a file `/path/to/azure/config/elasticsearch.yml`:\n\n``` yml\ncloud.azure.storage.default.account: ACCOUNT\ncloud.azure.storage.default.key: KEY\n```\n\nRun `AzureRepositoryF#main()` with `-Des.cluster.routing.allocation.disk.threshold_enabled=false -Des.path.home=/path/to/azure/` options.\n\nThen run:\n\n``` sh\ncurl -XDELETE localhost:9200/foo?pretty\ncurl -XDELETE localhost:9200/_snapshot/my_backup1?pretty\ncurl -XPUT localhost:9200/foo/bar/1?pretty -d '{\n \"foo\": \"bar\"\n}'\ncurl -XPOST localhost:9200/foo/_refresh?pretty\ncurl -XGET localhost:9200/foo/_count?pretty\ncurl -XPUT localhost:9200/_snapshot/my_backup1?pretty -d '{\n \"type\": \"azure\"\n}'\n\ncurl -XPOST \"localhost:9200/_snapshot/my_backup1/snap1?pretty&wait_for_completion=true\"\ncurl -XDELETE localhost:9200/foo?pretty\ncurl -XPOST \"localhost:9200/_snapshot/my_backup1/snap1/_restore?pretty&wait_for_completion=true\"\ncurl -XGET localhost:9200/foo/_count?pretty\n```\n\nThen check files we have on azure platform using the console.\nThen run:\n\n``` sh\ncurl -XDELETE localhost:9200/_snapshot/my_backup1/snap1?pretty\n```\n\nThen check files we have on azure platform using the console and verify that everything has been cleaned.\n", "number": 18571, "review_comments": [], "title": "Fix remove of azure files" }
{ "commits": [ { "message": "Fix remove of azure files\n\nProbably when we updated Azure SDK, we introduced a regression.\nActually, we are not able to remove files anymore.\n\nFor example, if you register a new azure repository, the snapshot service tries to create a temp file and then remove it.\nRemoving does not work and you can see in logs:\n\n```\n[2016-05-18 11:03:24,914][WARN ][org.elasticsearch.cloud.azure.blobstore] [azure] can not remove [tests-ilmRPJ8URU-sh18yj38O6g/] in container {elasticsearch-snapshots}: The specified blob does not exist.\n```\n\nThis fix deals with that. It now list all the files in a flatten mode, remove in the full URL the server and the container name.\n\nAs an example, when you are removing a blob which full name is `https://dpi24329.blob.core.windows.net/elasticsearch-snapshots/bar/test` you need to actually call Azure SDK with `bar/test` as the path, `elasticsearch-snapshots` is the container.\n\nRelated to #16472.\nRelated to #18436.\n\nBackport of #18451 in 2.x branch\n\nTo test it, I ran some manual tests:\n\nOn my laptop, create a file `/path/to/azure/config/elasticsearch.yml`:\n\n```yml\ncloud.azure.storage.default.account: ACCOUNT\ncloud.azure.storage.default.key: KEY\n```\n\nRun `AzureRepositoryF#main()` with `-Des.cluster.routing.allocation.disk.threshold_enabled=false -Des.path.home=/path/to/azure/` options.\n\nThen run:\n\n```sh\ncurl -XDELETE localhost:9200/foo?pretty\ncurl -XDELETE localhost:9200/_snapshot/my_backup1?pretty\ncurl -XPUT localhost:9200/foo/bar/1?pretty -d '{\n \"foo\": \"bar\"\n}'\ncurl -XPOST localhost:9200/foo/_refresh?pretty\ncurl -XGET localhost:9200/foo/_count?pretty\ncurl -XPUT localhost:9200/_snapshot/my_backup1?pretty -d '{\n \"type\": \"azure\"\n}'\n\ncurl -XPOST \"localhost:9200/_snapshot/my_backup1/snap1?pretty&wait_for_completion=true\"\ncurl -XDELETE localhost:9200/foo?pretty\ncurl -XPOST \"localhost:9200/_snapshot/my_backup1/snap1/_restore?pretty&wait_for_completion=true\"\ncurl -XGET localhost:9200/foo/_count?pretty\n```\n\nThen check files we have on azure platform using the console.\nThen run:\n\n```\ncurl -XDELETE localhost:9200/_snapshot/my_backup1/snap1?pretty\n```\n\nThen check files we have on azure platform using the console and verify that everything has been cleaned." } ], "files": [ { "diff": "@@ -60,6 +60,7 @@ public AzureBlobContainer(String repositoryName, BlobPath path, AzureBlobStore b\n \n @Override\n public boolean blobExists(String blobName) {\n+ logger.trace(\"blobExists({})\", blobName);\n try {\n return blobStore.blobExists(blobStore.container(), buildKey(blobName));\n } catch (URISyntaxException | StorageException e) {\n@@ -70,6 +71,7 @@ public boolean blobExists(String blobName) {\n \n @Override\n public InputStream openInput(String blobName) throws IOException {\n+ logger.trace(\"openInput({})\", blobName);\n try {\n return blobStore.getInputStream(blobStore.container(), buildKey(blobName));\n } catch (StorageException e) {\n@@ -84,6 +86,7 @@ public InputStream openInput(String blobName) throws IOException {\n \n @Override\n public OutputStream createOutput(String blobName) throws IOException {\n+ logger.trace(\"createOutput({})\", blobName);\n try {\n return new AzureOutputStream(blobStore.getOutputStream(blobStore.container(), buildKey(blobName)));\n } catch (StorageException e) {\n@@ -100,6 +103,7 @@ public OutputStream createOutput(String blobName) throws IOException {\n \n @Override\n public void deleteBlob(String blobName) throws IOException {\n+ logger.trace(\"deleteBlob({})\", blobName);\n try {\n blobStore.deleteBlob(blobStore.container(), buildKey(blobName));\n } catch (URISyntaxException | StorageException e) {\n@@ -110,6 +114,7 @@ public void deleteBlob(String blobName) throws IOException {\n \n @Override\n public Map<String, BlobMetaData> listBlobsByPrefix(@Nullable String prefix) throws IOException {\n+ logger.trace(\"listBlobsByPrefix({})\", prefix);\n \n try {\n return blobStore.listBlobsByPrefix(blobStore.container(), keyPath, prefix);\n@@ -121,6 +126,7 @@ public Map<String, BlobMetaData> listBlobsByPrefix(@Nullable String prefix) thro\n \n @Override\n public void move(String sourceBlobName, String targetBlobName) throws IOException {\n+ logger.trace(\"move({}, {})\", sourceBlobName, targetBlobName);\n try {\n String source = keyPath + sourceBlobName;\n String target = keyPath + targetBlobName;\n@@ -139,6 +145,7 @@ public void move(String sourceBlobName, String targetBlobName) throws IOExceptio\n \n @Override\n public Map<String, BlobMetaData> listBlobs() throws IOException {\n+ logger.trace(\"listBlobs()\");\n return listBlobsByPrefix(null);\n }\n ", "filename": "plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java", "status": "modified" }, { "diff": "@@ -133,8 +133,8 @@ CloudBlobClient getSelectedClient(String account, LocationMode mode) {\n public boolean doesContainerExist(String account, LocationMode mode, String container) {\n try {\n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n- return blob_container.exists();\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n+ return blobContainer.exists();\n } catch (Exception e) {\n logger.error(\"can not access container [{}]\", container);\n }\n@@ -144,25 +144,25 @@ public boolean doesContainerExist(String account, LocationMode mode, String cont\n @Override\n public void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException {\n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n // TODO Should we set some timeout and retry options?\n /*\n BlobRequestOptions options = new BlobRequestOptions();\n options.setTimeoutIntervalInMs(1000);\n options.setRetryPolicyFactory(new RetryNoRetry());\n- blob_container.deleteIfExists(options, null);\n+ blobContainer.deleteIfExists(options, null);\n */\n logger.trace(\"removing container [{}]\", container);\n- blob_container.deleteIfExists();\n+ blobContainer.deleteIfExists();\n }\n \n @Override\n public void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException {\n try {\n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n logger.trace(\"creating container [{}]\", container);\n- blob_container.createIfNotExists();\n+ blobContainer.createIfNotExists();\n } catch (IllegalArgumentException e) {\n logger.trace(\"fails creating container [{}]\", container, e.getMessage());\n throw new RepositoryException(container, e.getMessage());\n@@ -175,22 +175,44 @@ public void deleteFiles(String account, LocationMode mode, String container, Str\n \n // Container name must be lower case.\n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n- if (blob_container.exists()) {\n- for (ListBlobItem blobItem : blob_container.listBlobs(path)) {\n- logger.trace(\"removing blob [{}]\", blobItem.getUri());\n- deleteBlob(account, mode, container, blobItem.getUri().toString());\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n+ if (blobContainer.exists()) {\n+ // We list the blobs using a flat blob listing mode\n+ for (ListBlobItem blobItem : blobContainer.listBlobs(path, true)) {\n+ String blobName = blobNameFromUri(blobItem.getUri());\n+ logger.trace(\"removing blob [{}] full URI was [{}]\", blobName, blobItem.getUri());\n+ deleteBlob(account, mode, container, blobName);\n }\n }\n }\n \n+ /**\n+ * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile\n+ * It should remove the container part (first part of the path) and gives path/to/myfile\n+ * @param uri URI to parse\n+ * @return The blob name relative to the container\n+ */\n+ public static String blobNameFromUri(URI uri) {\n+ String path = uri.getPath();\n+\n+ // We remove the container name from the path\n+ // The 3 magic number cames from the fact if path is /container/path/to/myfile\n+ // First occurrence is empty \"/\"\n+ // Second occurrence is \"container\n+ // Last part contains \"path/to/myfile\" which is what we want to get\n+ String[] splits = path.split(\"/\", 3);\n+\n+ // We return the remaining end of the string\n+ return splits[2];\n+ }\n+\n @Override\n public boolean blobExists(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException {\n // Container name must be lower case.\n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n- if (blob_container.exists()) {\n- CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob);\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n+ if (blobContainer.exists()) {\n+ CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob);\n return azureBlob.exists();\n }\n \n@@ -203,10 +225,10 @@ public void deleteBlob(String account, LocationMode mode, String container, Stri\n \n // Container name must be lower case.\n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n- if (blob_container.exists()) {\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n+ if (blobContainer.exists()) {\n logger.trace(\"container [{}]: blob [{}] found. removing.\", container, blob);\n- CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob);\n+ CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob);\n azureBlob.delete();\n }\n }\n@@ -266,10 +288,10 @@ public void moveBlob(String account, LocationMode mode, String container, String\n logger.debug(\"moveBlob container [{}], sourceBlob [{}], targetBlob [{}]\", container, sourceBlob, targetBlob);\n \n CloudBlobClient client = this.getSelectedClient(account, mode);\n- CloudBlobContainer blob_container = client.getContainerReference(container);\n- CloudBlockBlob blobSource = blob_container.getBlockBlobReference(sourceBlob);\n+ CloudBlobContainer blobContainer = client.getContainerReference(container);\n+ CloudBlockBlob blobSource = blobContainer.getBlockBlobReference(sourceBlob);\n if (blobSource.exists()) {\n- CloudBlockBlob blobTarget = blob_container.getBlockBlobReference(targetBlob);\n+ CloudBlockBlob blobTarget = blobContainer.getBlockBlobReference(targetBlob);\n blobTarget.startCopy(blobSource);\n blobSource.delete();\n logger.debug(\"moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done\", container, sourceBlob, targetBlob);", "filename": "plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java", "status": "modified" }, { "diff": "@@ -25,7 +25,9 @@\n import org.elasticsearch.test.ESTestCase;\n \n import java.net.URI;\n+import java.net.URISyntaxException;\n \n+import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri;\n import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.nullValue;\n \n@@ -168,4 +170,15 @@ void createClient(AzureStorageSettings azureStorageSettings) {\n new CloudBlobClient(URI.create(\"https://\" + azureStorageSettings.getName())));\n }\n }\n+\n+ public void testBlobNameFromUri() throws URISyntaxException {\n+ String name = blobNameFromUri(new URI(\"https://myservice.azure.net/container/path/to/myfile\"));\n+ assertThat(name, is(\"path/to/myfile\"));\n+ name = blobNameFromUri(new URI(\"http://myservice.azure.net/container/path/to/myfile\"));\n+ assertThat(name, is(\"path/to/myfile\"));\n+ name = blobNameFromUri(new URI(\"http://127.0.0.1/container/path/to/myfile\"));\n+ assertThat(name, is(\"path/to/myfile\"));\n+ name = blobNameFromUri(new URI(\"https://127.0.0.1/container/path/to/myfile\"));\n+ assertThat(name, is(\"path/to/myfile\"));\n+ }\n }", "filename": "plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTest.java", "status": "modified" } ] }
{ "body": "for this kind of query\n\n```\nGET /idx/type/5359b8d10cf2f2ff4ec71ed7/_percolate\n{\n \"filter\": {\n \"or\": {\n \"filters\": [\n {}\n ]\n }\n }\n}\n```\n", "comments": [ { "body": "This issue is still reproducible ?\n", "created_at": "2014-12-09T07:11:53Z" }, { "body": "This is still broken in 2.0\n", "created_at": "2015-10-14T16:32:27Z" }, { "body": "@clintongormley I took a look and seems like this is a fairly easy fix. I'm assuming the expected behavior is to ignore the `filter` in this case (all percolators executed). I wanted to add an integration test and noticed the [PercolateRequestBuilder](https://github.com/elastic/elasticsearch/blob/2.x/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java) and [PercolateSourceBuilder](https://github.com/elastic/elasticsearch/blob/2.x/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java) had the `filter` option removed in this commit: [Query DSL: Remove filter parsers.](https://github.com/elastic/elasticsearch/commit/a0af88e99630b9d3f0c2cf4997f2e82f1f834d41) However the [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/2.3/search-percolate.html#_percolate_api) indicates both the `query` and `filter` options are still supported. My plan was to add the `filter` option back to those java client classes as a `QueryBuilder`. Let me know if I'm not on the right track here. Thanks.\n", "created_at": "2016-05-21T19:44:14Z" }, { "body": "Hey @qwerty4030 the best way to fix this NPE is to replace line 321 in `PercolatorService.java` (2.x branch) with this:\n\n``` java\nParsedQuery parsedQuery = documentIndexService.queryParserService().parseInnerFilter(parser);\nif (parsedQuery != null) {\n context.percolateQuery(new ConstantScoreQuery(parsedQuery.query()));\n}\n```\n\nFilters are deprecated and therefor adding filter methods to the Java api isn't a good idea.\nThis issue doesn't occur in the master branch (percolator has been rewritten), but it makes sense to fix it in 2.x and 2.3 branches. If you open a PR for this then I'm happy to merge it.\n", "created_at": "2016-05-23T08:38:40Z" }, { "body": "Fixed via #18563\n", "created_at": "2016-05-25T09:57:47Z" } ], "number": 6172, "title": "nullpointerexception at https://github.com/elasticsearch/elasticsearch/blob/v1.1.1/src/main/java/org/elasticsearch/percolator/PercolatorService.java#L301" }
{ "body": "Any \"empty\" (null) percolator filter option resulted in an NPE:\n`GET /index/type/doc/_percolate { \"filter\": { \"or\": { \"filters\": [ {} ] } } }`\n`GET /index/type/_percolate { \"filter\": {} }`\n\nThis PR updates the logic to ignore the filter option if it is `null` (all percolators executed).\nNot an issue on current master (percolator was rewritten).\n\nNot possible to integration test since [PercolateRequestBuilder](https://github.com/elastic/elasticsearch/blob/2.x/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java) and [PercolateSourceBuilder](https://github.com/elastic/elasticsearch/blob/2.x/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java) had the `filter` option removed in this commit: [Query DSL: Remove filter parsers.](https://github.com/elastic/elasticsearch/commit/a0af88e99630b9d3f0c2cf4997f2e82f1f834d41) Also these filters are deprecated.\n\nCloses #6172 \n", "number": 18563, "review_comments": [], "title": "Fixed NPE when percolator filter option is \"empty\"." }
{ "commits": [ { "message": "Fixed NPE when percolator filter option is \"empty\" (All percolators will be executed.)" } ], "files": [ { "diff": "@@ -318,8 +318,10 @@ private ParsedDocument parseRequest(IndexService documentIndexService, Percolate\n if (context.percolateQuery() != null) {\n throw new ElasticsearchParseException(\"Either specify query or filter, not both\");\n }\n- Query filter = documentIndexService.queryParserService().parseInnerFilter(parser).query();\n- context.percolateQuery(new ConstantScoreQuery(filter));\n+ ParsedQuery parsedQuery = documentIndexService.queryParserService().parseInnerFilter(parser);\n+ if(parsedQuery != null) {\n+ context.percolateQuery(new ConstantScoreQuery(parsedQuery.query()));\n+ }\n } else if (\"sort\".equals(currentFieldName)) {\n parseSort(parser, context);\n } else if (element != null) {", "filename": "core/src/main/java/org/elasticsearch/percolator/PercolatorService.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch version**: `2.3.2`\n\n**JVM version**: \n\n```\nopenjdk version \"1.8.0_45-internal\"\nOpenJDK Runtime Environment (build 1.8.0_45-internal-b14)\nOpenJDK 64-Bit Server VM (build 25.45-b02, mixed mode)\n```\n\n**OS version**: `Ubuntu 12.04.5 LTS (GNU/Linux 3.2.0-80-virtual x86_64)`\n\n**Description of the problem including expected versus actual behavior**: date_histogram aggregations fail if interval is set to `0.1s` but work fine if set to `100ms`. I would say the problem comes from the parser and it doesn't have anything to do with date_histograms.\n\n**Steps to reproduce**:\n1. Following query fails\n\n```\ncurl -XGET 'http://localhost:9200/logstash-2016.05.23/_search?pretty' -d '\n{\n \"size\" : 0,\n \"aggs\" : {\n \"0\" : {\n \"aggs\" : {\n \"0\" : {\n \"date_histogram\" : {\n \"field\" : \"@timestamp\",\n \"interval\" : \"0.1s\"\n }\n }\n },\n \"filter\" : {\n \"query\" : {\n \"filtered\" : {\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"*\"\n }\n },\n \"filter\" : {\n \"bool\" : {\n \"must\" : [\n {\n \"range\" : {\n \"@timestamp\" : {\n \"to\" : 1464034121806,\n \"from\" : 1464034119663\n }\n }\n }\n ]\n }\n }\n }\n }\n }\n }\n }\n}\n'\n{\n \"error\" : {\n \"root_cause\" : [ {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"Zero or negative time interval not supported\"\n } ],\n \"type\" : \"search_phase_execution_exception\",\n \"reason\" : \"all shards failed\",\n \"phase\" : \"query\",\n \"grouped\" : true,\n \"failed_shards\" : [ {\n \"shard\" : 0,\n \"index\" : \"logstash-2016.05.23\",\n \"node\" : \"z3wh9XcOQjCIyVjT2H-8qg\",\n \"reason\" : {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"Zero or negative time interval not supported\"\n }\n } ]\n },\n \"status\" : 400\n}\n```\n\nbut it works fine if I change the `interval` field to the equivalent value `100ms` \n\n```\ncurl -XGET 'http://eln02.useb.cartodb.net:9200/logstash-2016.05.23/_search?pretty' -d '\n{\n \"size\" : 0,\n \"aggs\" : {\n \"0\" : {\n \"aggs\" : {\n \"0\" : {\n \"date_histogram\" : {\n \"field\" : \"@timestamp\",\n \"interval\" : \"100ms\"\n }\n }\n },\n \"filter\" : {\n \"query\" : {\n \"filtered\" : {\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"*\"\n }\n },\n \"filter\" : {\n \"bool\" : {\n \"must\" : [\n {\n \"range\" : {\n \"@timestamp\" : {\n \"to\" : 1464034121806,\n \"from\" : 1464034119663\n }\n }\n }\n ]\n }\n }\n }\n }\n }\n }\n }\n}\n'\n{\n \"took\" : 304,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n\n(...)\n```\n\n**Provide logs (if relevant)**:\n\n```\n2016-05-24 11:25:06,144][DEBUG][action.search ] [eln02] All shards failed for phase: [query]\nRemoteTransportException[[eln01][10.0.2.81:9300][indices:data/read/search[phase/query]]]; nested: SearchParseException[failed to parse search source [\n{\n \"size\" : 0,\n \"aggs\" : {\n \"0\" : {\n \"aggs\" : {\n \"0\" : {\n \"date_histogram\" : {\n \"field\" : \"@timestamp\",\n \"interval\" : \"0.1s\"\n }\n }\n },\n \"filter\" : {\n \"query\" : {\n \"filtered\" : {\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"*\"\n }\n },\n \"filter\" : {\n \"bool\" : {\n \"must\" : [\n {\n \"range\" : {\n \"@timestamp\" : {\n \"to\" : 1464034121806,\n \"from\" : 1464034119663\n }\n }\n }\n ]\n }\n }\n }\n }\n }\n }\n }\n}\n]]; nested: IllegalArgumentException[Zero or negative time interval not supported];\nCaused by: SearchParseException[failed to parse search source [\n{\n \"size\" : 0,\n \"aggs\" : {\n \"0\" : {\n \"aggs\" : {\n \"0\" : {\n \"date_histogram\" : {\n \"field\" : \"@timestamp\",\n \"interval\" : \"0.1s\"\n }\n }\n },\n \"filter\" : {\n \"query\" : {\n \"filtered\" : {\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"*\"\n }\n },\n \"filter\" : {\n \"bool\" : {\n \"must\" : [\n {\n \"range\" : {\n \"@timestamp\" : {\n \"to\" : 1464034121806,\n \"from\" : 1464034119663\n }\n }\n }\n ]\n }\n }\n }\n }\n }\n }\n }\n}\n]]; nested: IllegalArgumentException[Zero or negative time interval not supported];\n at org.elasticsearch.search.SearchService.parseSource(SearchService.java:855)\n at org.elasticsearch.search.SearchService.createContext(SearchService.java:654)\n at org.elasticsearch.search.SearchService.createAndPutContext(SearchService.java:620)\n at org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:371)\n at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryTransportHandler.messageReceived(SearchServiceTransportAction.java:368)\n at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryTransportHandler.messageReceived(SearchServiceTransportAction.java:365)\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:75)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:300)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: java.lang.IllegalArgumentException: Zero or negative time interval not supported\n at org.elasticsearch.common.rounding.TimeZoneRounding$Builder.<init>(TimeZoneRounding.java:62)\n at org.elasticsearch.common.rounding.TimeZoneRounding.builder(TimeZoneRounding.java:40)\n at org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser.parse(DateHistogramParser.java:184)\n at org.elasticsearch.search.aggregations.AggregatorParsers.parseAggregators(AggregatorParsers.java:198)\n at org.elasticsearch.search.aggregations.AggregatorParsers.parseAggregators(AggregatorParsers.java:176)\n at org.elasticsearch.search.aggregations.AggregatorParsers.parseAggregators(AggregatorParsers.java:103)\n at org.elasticsearch.search.aggregations.AggregationParseElement.parse(AggregationParseElement.java:60)\n at org.elasticsearch.search.SearchService.parseSource(SearchService.java:838)\n```\n", "comments": [ { "body": "As an aside fact, I've tested it against Elasticsearch 1.5.2 and it works fine\n", "created_at": "2016-05-24T11:38:20Z" }, { "body": "It's a bug in the time value parsing that only impacts seconds parsing due to a misplaced parenthesis and thus a premature cast. I'll open a pull request. This bug is present in 2.x but not master.\n", "created_at": "2016-05-24T13:52:46Z" }, { "body": "I opened #18548.\n", "created_at": "2016-05-24T13:59:39Z" }, { "body": "Wow, that was fast! Thanks for taking a look, @jasontedor \n", "created_at": "2016-05-24T14:02:52Z" }, { "body": "Thanks for reporting @rporres!\n", "created_at": "2016-05-24T15:21:35Z" }, { "body": "Closed by #18548\n", "created_at": "2016-05-24T15:21:37Z" } ], "number": 18546, "title": "Parser raises an exception if interval is set to \"0.1s\" but works fine with \"100ms\"" }
{ "body": "This commit fixes a misplaced cast when parsing seconds. Namely, the\ninput seconds are parsed as a double and then cast to a long before\nmultiplying by the scale. This can lead to truncation to zero before\nmultiplying by the scale thus leading to \"0.1s\" being parsed as zero\nmilliseconds. Instead, the cast should occur after multiplying by the\nscale so as to not prematurely truncate.\n\nCloses #18546\n", "number": 18548, "review_comments": [], "title": "Fix misplaced cast when parsing seconds" }
{ "commits": [ { "message": "Fix misplaced cast when parsing seconds\n\nThis commit fixes a misplaced cast when parsing seconds. Namely, the\ninput seconds are parsed as a double and then cast to a long before\nmultiplying by the scale. This can lead to truncation to zero before\nmultiplying by the scale thus leading to \"0.1s\" being parsed as zero\nmilliseconds. Instead, the cast should occur after multiplying by the\nscale so as to not prematurely truncate." } ], "files": [ { "diff": "@@ -241,7 +241,7 @@ public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, St\n if (lowerSValue.endsWith(\"ms\")) {\n millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 2)));\n } else if (lowerSValue.endsWith(\"s\")) {\n- millis = (long) Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 1000;\n+ millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 1000);\n } else if (lowerSValue.endsWith(\"m\")) {\n millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 1000);\n } else if (lowerSValue.endsWith(\"h\")) {", "filename": "core/src/main/java/org/elasticsearch/common/unit/TimeValue.java", "status": "modified" }, { "diff": "@@ -83,6 +83,9 @@ public void testParseTimeValue() {\n assertEquals(new TimeValue(10, TimeUnit.SECONDS),\n TimeValue.parseTimeValue(\"10S\", null, \"test\"));\n \n+ assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS),\n+ TimeValue.parseTimeValue(\"0.1s\", null, \"test\"));\n+\n assertEquals(new TimeValue(10, TimeUnit.MINUTES),\n TimeValue.parseTimeValue(\"10 m\", null, \"test\"));\n assertEquals(new TimeValue(10, TimeUnit.MINUTES),", "filename": "core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java", "status": "modified" } ] }
{ "body": "<!--\nGitHub is reserved for bug reports and feature requests. The best place\nto ask a general question is at the Elastic Discourse forums at\nhttps://discuss.elastic.co. If you are in fact posting a bug report or\na feature request, please include one and only one of the below blocks\nin your new issue.\n-->\n\n<!--\nIf you are filing a bug report, please remove the below feature\nrequest block and provide responses for all of the below items.\n-->\n\n**Elasticsearch Versions**:\n2.1.x - 2.2.x\n\n**JVM Version**:\nOpenJDK 64-Bit Server VM (build 25.77-b03, mixed mode)\n\n**Description of the problem including expected versus actual behavior**:\n\nIt looks like if you create and delete indices very rapidly you can sometimes end up getting a 404 index_not_found_exception from the `/_cat/indices` API. So basically rather than seeing the indices that still exist, you get an exception because one index was created and deleted between when the endpoint resolves indices and then call indices stats api for those indices.\n\n**Steps to reproduce**:\n\nAttached is a script that should hopefully reproduce the problem, but it can take some time (few minutes depending on the ability of your ES cluster to handle responses :)\n\n[cat-indices-issue.sh.zip](https://github.com/elastic/elasticsearch/files/194945/cat-indices-issue.sh.zip)\n\nThe script is Bash. It will run a number of background processes that simply spin creating and then deleting an index with the REST API. The script waits until the output of `_cat/indices` produces the exception, then exits. **NOTE:** It will clean-up any background processes it creates but won't clean up any test indices that might still exist.\n\nWhen the script dies, it will produce output like:\n\n```\n{\"error\":{\"root_cause\":[{\"type\":\"index_not_found_exception\",\"reason\":\"no such index\",\"resource.type\":\"index_or_alias\",\"resource.id\":\"testindex3\",\"index\":\"testindex3\"}],\"type\":\"index_not_found_exception\",\"reason\":\"no such index\",\"resource.type\":\"index_or_alias\",\"resource.id\":\"testindex3\",\"index\":\"testindex3\"},\"status\":404}\n...output of the script cleaning up after itself...\n```\n", "comments": [ { "body": "@joshuar thank you for logging this! (via @faeldt)\n\nTo add, sometimes the `\"type\": \"null_pointer_exception\"` (as an addition to the above; sorry I don't have full JSON output at hand) would show up when querying `/_cat/indices` or `/_cat/shards`, etc.\n\nSince this is a machine parseable output, it would be delightful for it to suppress any error and just not return JSON (or any errors for that matter, unless you add `?e=true` or something).\n", "created_at": "2016-03-30T06:47:36Z" }, { "body": "@kwilczynski if you happen to come across this NPE again, please paste the stack trace from the logs into this issue\n", "created_at": "2016-03-30T19:35:08Z" }, { "body": "@clintongormley I will try to fish something on the Elasticsearch side, as since I fixed this on my side (to retry on error, and I don't log in debug in production, I lost it for now, sadly).\n", "created_at": "2016-03-31T01:34:26Z" }, { "body": "@clintongormley I found the following NPE going through the older logs:\n\n```\nRemoteTransportException[[prd11-c-tky-master-search-catalogpf][10.184.20.209:9300][cluster:monitor/stats[n]]]; nested: NullPointerException;\n:Caused by: java.lang.NullPointerException\n at org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth.<init>(ClusterIndexHealth.java:73)\n at org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction.nodeOperation(TransportClusterStatsAction.java:127)\n at org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction.nodeOperation(TransportClusterStatsAction.java:58)\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:211)\n at org.elasticsearch.action.support.nodes.TransportNodesAction$NodeTransportHandler.messageReceived(TransportNodesAction.java:207)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n", "created_at": "2016-03-31T02:40:33Z" }, { "body": "thanks @kwilczynski \n", "created_at": "2016-03-31T17:36:36Z" }, { "body": "Note I also am receiving similar with `_cat/indices` on ES 2.3.2. It happens if I'm running a node with multiple `path.data` entries and remove one of the drives that was on the `path.data`:\n\n```\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"null_pointer_exception\",\n \"reason\": null\n }\n ],\n \"type\": \"null_pointer_exception\",\n \"reason\": null\n },\n \"status\": 500\n}\n```\n\nWithe following stacktrace from the node:\n\n```\n[2016-05-09 11:40:24,520][WARN ][rest.suppressed ] /_cat/indices Params: {}\njava.lang.NullPointerException\n at org.elasticsearch.rest.action.cat.RestIndicesAction.buildTable(RestIndicesAction.java:331)\n at org.elasticsearch.rest.action.cat.RestIndicesAction.access$100(RestIndicesAction.java:52)\n at org.elasticsearch.rest.action.cat.RestIndicesAction$1$1$1.buildResponse(RestIndicesAction.java:97)\n at org.elasticsearch.rest.action.cat.RestIndicesAction$1$1$1.buildResponse(RestIndicesAction.java:94)\n at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)\n at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)\n at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:89)\n at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:85)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction.onCompletion(TransportBroadcastByNodeAction.java:378)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction.onNodeResponse(TransportBroadcastByNodeAction.java:347)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction$1.handleResponse(TransportBroadcastByNodeAction.java:319)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction$1.handleResponse(TransportBroadcastByNodeAction.java:311)\n at org.elasticsearch.transport.TransportService$DirectResponseChannel.processResponse(TransportService.java:819)\n at org.elasticsearch.transport.TransportService$DirectResponseChannel.sendResponse(TransportService.java:803)\n at org.elasticsearch.transport.TransportService$DirectResponseChannel.sendResponse(TransportService.java:793)\n at org.elasticsearch.transport.DelegatingTransportChannel.sendResponse(DelegatingTransportChannel.java:58)\n at org.elasticsearch.transport.RequestHandlerRegistry$TransportChannelWrapper.sendResponse(RequestHandlerRegistry.java:134)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:412)\n at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$BroadcastByNodeTransportRequestHandler.messageReceived(TransportBroadcastByNodeAction.java:386)\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:75)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:376)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\n```\n", "created_at": "2016-05-09T15:46:22Z" } ], "number": 17395, "title": "/_cat/indices: race condition/exception thrown when creating/deleting indices rapidly " }
{ "body": "Closed indices are already displayed when no indices are explicitly selected. This commit ensures that closed indices are also shown when wildcard filtering is used (fixes #16419). It also addresses another issue that is caused by the fact that the cat action is based internally on 3 different cluster states (one when we query the cluster state to get all indices, one when we query cluster health, and one when we query indices stats). We currently fail the cat request when the user specifies a concrete index as parameter that does not exist. The implementation works as intended in that regard. It checks this not only for the first cluster state request, but also the subsequent indices stats one. This means that if the index is deleted before the cat action has queried the indices stats, it rightfully fails. In case the user provides wildcards (or no parameter at all), however, we fail the indices stats as we pass the resolved concrete indices to the indices stats request and fail to distinguish whether these indices have been resolved by wildcards or explicitly requested by the user. This means that if an index has been deleted before the indices stats request gets to execute, we fail the overall cat request (see #17395). The fix is to let the indices stats request do the resolving again and not pass the concrete indices.\n\nCloses #16419\nCloses #17395\n", "number": 18545, "review_comments": [ { "body": "can we not read the indices from the cluster state response instead of resolving them manually again? I guess I don't get the order problem...\n", "created_at": "2016-05-24T11:53:23Z" }, { "body": "maybe add a comment to keep track of the fact that if an index is gone after the cluster state request, subsequent requests will fail with index not found.\n", "created_at": "2016-05-24T11:54:41Z" }, { "body": "when you run `/_cat/indices/foo,bar`, the `cat` api currently returns the results in a table where the entries are ordered according to the specified parameters (first `foo`, then `bar`). I did not want to break that behavior with this PR. I just added the comment above to document why we do that.\n", "created_at": "2016-05-24T12:00:13Z" }, { "body": "For the cluster health request that's not true as cluster health requests have hard-coded indices options that negate that effect (`lenientExpandOpen()`). The best we can do is ensure it for the indices stats request (that's what we do).\n", "created_at": "2016-05-24T12:03:47Z" }, { "body": "ok, while using cluster state output elements are ordered in a non deterministic way? I did not know what.\n", "created_at": "2016-05-24T12:05:50Z" }, { "body": "right, thanks for clarifying. I am just looking for a way to not lose the research that you have done after we get this in :) a small comment about indices stats that may fail if the index is deleted meanwhile?\n", "created_at": "2016-05-24T12:09:06Z" }, { "body": "I've pushed a commit with more documentation. Can you have another look please?\n", "created_at": "2016-05-24T12:24:13Z" } ], "title": "Expand wildcards to closed indices in /_cat/indices" }
{ "commits": [ { "message": "Expand wildcards to closed indices in /_cat/indices\n\nCloses #16419\nCloses #17395" }, { "message": "more documentation" } ], "files": [ { "diff": "@@ -77,27 +77,41 @@ public void doRequest(final RestRequest request, final RestChannel channel, fina\n clusterStateRequest.clear().indices(indices).metaData(true);\n clusterStateRequest.local(request.paramAsBoolean(\"local\", clusterStateRequest.local()));\n clusterStateRequest.masterNodeTimeout(request.paramAsTime(\"master_timeout\", clusterStateRequest.masterNodeTimeout()));\n+ final IndicesOptions strictExpandIndicesOptions = IndicesOptions.strictExpand();\n+ clusterStateRequest.indicesOptions(strictExpandIndicesOptions);\n \n client.admin().cluster().state(clusterStateRequest, new RestActionListener<ClusterStateResponse>(channel) {\n @Override\n public void processResponse(final ClusterStateResponse clusterStateResponse) {\n- ClusterState state = clusterStateResponse.getState();\n- final IndicesOptions concreteIndicesOptions = IndicesOptions.fromOptions(false, true, true, true);\n- final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, concreteIndicesOptions, indices);\n- final String[] openIndices = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), indices);\n- ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(openIndices);\n+ final ClusterState state = clusterStateResponse.getState();\n+ final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, strictExpandIndicesOptions, indices);\n+ // concreteIndices should contain exactly the indices in state.metaData() that were selected by clusterStateRequest using\n+ // IndicesOptions.strictExpand(). We select the indices again here so that they can be displayed in the resulting table\n+ // in the requesting order.\n+ assert concreteIndices.length == state.metaData().getIndices().size();\n+\n+ // Indices that were successfully resolved during the cluster state request might be deleted when the subsequent cluster\n+ // health and indices stats requests execute. We have to distinguish two cases:\n+ // 1) the deleted index was explicitly passed as parameter to the /_cat/indices request. In this case we want the subsequent\n+ // requests to fail.\n+ // 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent requests not to\n+ // fail on the deleted index (as we want to ignore wildcards that cannot be resolved).\n+ // This behavior can be ensured by letting the cluster health and indices stats requests re-resolve the index names with the\n+ // same indices options that we used for the initial cluster state request (strictExpand). Unfortunately cluster health\n+ // requests hard-code their indices options and the best we can do is apply strictExpand to the indices stats request.\n+ ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(indices);\n clusterHealthRequest.local(request.paramAsBoolean(\"local\", clusterHealthRequest.local()));\n client.admin().cluster().health(clusterHealthRequest, new RestActionListener<ClusterHealthResponse>(channel) {\n @Override\n public void processResponse(final ClusterHealthResponse clusterHealthResponse) {\n IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();\n- indicesStatsRequest.indices(concreteIndices);\n- indicesStatsRequest.indicesOptions(concreteIndicesOptions);\n+ indicesStatsRequest.indices(indices);\n+ indicesStatsRequest.indicesOptions(strictExpandIndicesOptions);\n indicesStatsRequest.all();\n client.admin().indices().stats(indicesStatsRequest, new RestResponseListener<IndicesStatsResponse>(channel) {\n @Override\n public RestResponse buildResponse(IndicesStatsResponse indicesStatsResponse) throws Exception {\n- Table tab = buildTable(request, concreteIndices, clusterHealthResponse, indicesStatsResponse, clusterStateResponse.getState().metaData());\n+ Table tab = buildTable(request, concreteIndices, clusterHealthResponse, indicesStatsResponse, state.metaData());\n return RestTable.buildResponse(tab, channel);\n }\n });", "filename": "core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java", "status": "modified" }, { "diff": "@@ -24,7 +24,7 @@\n - match:\n $body: |\n /^(green \\s+\n- (open|close) \\s+\n+ open \\s+\n index1 \\s+\n 1 \\s+\n 0 \\s+\n@@ -49,3 +49,24 @@\n (\\d\\d\\d\\d\\-\\d\\d\\-\\d\\dT\\d\\d:\\d\\d:\\d\\d.\\d\\d\\dZ) \\s*\n )\n $/\n+ - do:\n+ indices.close:\n+ index: index1\n+\n+ - do:\n+ cat.indices:\n+ index: index*\n+\n+ - match:\n+ $body: |\n+ /^( \\s+\n+ close \\s+\n+ index1 \\s+\n+ \\s+\n+ \\s+\n+ \\s+\n+ \\s+\n+ \\s+\n+ \\s*\n+ )\n+ $/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml", "status": "modified" } ] }
{ "body": "Hi,\n\nWith the `indices` cat API, I can list my indices, either _open_ or _close_.\n\n```\n→ curl http://127.0.0.1:9200/_cat/indices\n close partner_results-20150930\nyellow open partner_requests-20150929 1 1 17 0 230.3kb 230.3kb\nyellow open partner_results-20151102 5 1 4311 0 1.5mb 1.5mb\nyellow open search_requests-20150902 5 1 10 0 51.3kb 51.3kb\n```\n\nIf I filter my results, I no longer see _close_ indices : \n\n```\n→ curl http://127.0.0.1:9200/_cat/indices/\\*_results-\\*\nyellow open partner_results-20151102 5 1 4311 0 1.5mb 1.5mb\n```\n\nI didn't find the documentation about that so I don't know if it a bug or an undocumented behavior.\n\nI thought there might be an argument for `/_cat/indices` to show `all/open/close` indices, but I haven't found anything.\n", "comments": [ { "body": "I would like to start contributing by fixing this one, if this can be assigned to me!\n", "created_at": "2016-02-19T09:48:51Z" }, { "body": "> I would like to start contributing by fixing this one, if this can be assigned to me!\n\nGrumble grumble it looks like you can't assign non-\"elastic members\" to issues. I was wrong! Anyway, consider it claimed.\n", "created_at": "2016-02-19T15:12:02Z" }, { "body": "@nik9000 In such a case, I think we can remove the `adoptme` label. I just updated the issue.\n", "created_at": "2016-02-19T15:26:07Z" }, { "body": "> @nik9000 In such a case, I think we can remove the adoptme label. I just updated the issue.\n\nThanks!\n", "created_at": "2016-02-19T15:28:49Z" } ], "number": 16419, "title": "Indices cat API doesn't list closed indices when filtered" }
{ "body": "Closed indices are already displayed when no indices are explicitly selected. This commit ensures that closed indices are also shown when wildcard filtering is used (fixes #16419). It also addresses another issue that is caused by the fact that the cat action is based internally on 3 different cluster states (one when we query the cluster state to get all indices, one when we query cluster health, and one when we query indices stats). We currently fail the cat request when the user specifies a concrete index as parameter that does not exist. The implementation works as intended in that regard. It checks this not only for the first cluster state request, but also the subsequent indices stats one. This means that if the index is deleted before the cat action has queried the indices stats, it rightfully fails. In case the user provides wildcards (or no parameter at all), however, we fail the indices stats as we pass the resolved concrete indices to the indices stats request and fail to distinguish whether these indices have been resolved by wildcards or explicitly requested by the user. This means that if an index has been deleted before the indices stats request gets to execute, we fail the overall cat request (see #17395). The fix is to let the indices stats request do the resolving again and not pass the concrete indices.\n\nCloses #16419\nCloses #17395\n", "number": 18545, "review_comments": [ { "body": "can we not read the indices from the cluster state response instead of resolving them manually again? I guess I don't get the order problem...\n", "created_at": "2016-05-24T11:53:23Z" }, { "body": "maybe add a comment to keep track of the fact that if an index is gone after the cluster state request, subsequent requests will fail with index not found.\n", "created_at": "2016-05-24T11:54:41Z" }, { "body": "when you run `/_cat/indices/foo,bar`, the `cat` api currently returns the results in a table where the entries are ordered according to the specified parameters (first `foo`, then `bar`). I did not want to break that behavior with this PR. I just added the comment above to document why we do that.\n", "created_at": "2016-05-24T12:00:13Z" }, { "body": "For the cluster health request that's not true as cluster health requests have hard-coded indices options that negate that effect (`lenientExpandOpen()`). The best we can do is ensure it for the indices stats request (that's what we do).\n", "created_at": "2016-05-24T12:03:47Z" }, { "body": "ok, while using cluster state output elements are ordered in a non deterministic way? I did not know what.\n", "created_at": "2016-05-24T12:05:50Z" }, { "body": "right, thanks for clarifying. I am just looking for a way to not lose the research that you have done after we get this in :) a small comment about indices stats that may fail if the index is deleted meanwhile?\n", "created_at": "2016-05-24T12:09:06Z" }, { "body": "I've pushed a commit with more documentation. Can you have another look please?\n", "created_at": "2016-05-24T12:24:13Z" } ], "title": "Expand wildcards to closed indices in /_cat/indices" }
{ "commits": [ { "message": "Expand wildcards to closed indices in /_cat/indices\n\nCloses #16419\nCloses #17395" }, { "message": "more documentation" } ], "files": [ { "diff": "@@ -77,27 +77,41 @@ public void doRequest(final RestRequest request, final RestChannel channel, fina\n clusterStateRequest.clear().indices(indices).metaData(true);\n clusterStateRequest.local(request.paramAsBoolean(\"local\", clusterStateRequest.local()));\n clusterStateRequest.masterNodeTimeout(request.paramAsTime(\"master_timeout\", clusterStateRequest.masterNodeTimeout()));\n+ final IndicesOptions strictExpandIndicesOptions = IndicesOptions.strictExpand();\n+ clusterStateRequest.indicesOptions(strictExpandIndicesOptions);\n \n client.admin().cluster().state(clusterStateRequest, new RestActionListener<ClusterStateResponse>(channel) {\n @Override\n public void processResponse(final ClusterStateResponse clusterStateResponse) {\n- ClusterState state = clusterStateResponse.getState();\n- final IndicesOptions concreteIndicesOptions = IndicesOptions.fromOptions(false, true, true, true);\n- final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, concreteIndicesOptions, indices);\n- final String[] openIndices = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), indices);\n- ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(openIndices);\n+ final ClusterState state = clusterStateResponse.getState();\n+ final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, strictExpandIndicesOptions, indices);\n+ // concreteIndices should contain exactly the indices in state.metaData() that were selected by clusterStateRequest using\n+ // IndicesOptions.strictExpand(). We select the indices again here so that they can be displayed in the resulting table\n+ // in the requesting order.\n+ assert concreteIndices.length == state.metaData().getIndices().size();\n+\n+ // Indices that were successfully resolved during the cluster state request might be deleted when the subsequent cluster\n+ // health and indices stats requests execute. We have to distinguish two cases:\n+ // 1) the deleted index was explicitly passed as parameter to the /_cat/indices request. In this case we want the subsequent\n+ // requests to fail.\n+ // 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent requests not to\n+ // fail on the deleted index (as we want to ignore wildcards that cannot be resolved).\n+ // This behavior can be ensured by letting the cluster health and indices stats requests re-resolve the index names with the\n+ // same indices options that we used for the initial cluster state request (strictExpand). Unfortunately cluster health\n+ // requests hard-code their indices options and the best we can do is apply strictExpand to the indices stats request.\n+ ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(indices);\n clusterHealthRequest.local(request.paramAsBoolean(\"local\", clusterHealthRequest.local()));\n client.admin().cluster().health(clusterHealthRequest, new RestActionListener<ClusterHealthResponse>(channel) {\n @Override\n public void processResponse(final ClusterHealthResponse clusterHealthResponse) {\n IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();\n- indicesStatsRequest.indices(concreteIndices);\n- indicesStatsRequest.indicesOptions(concreteIndicesOptions);\n+ indicesStatsRequest.indices(indices);\n+ indicesStatsRequest.indicesOptions(strictExpandIndicesOptions);\n indicesStatsRequest.all();\n client.admin().indices().stats(indicesStatsRequest, new RestResponseListener<IndicesStatsResponse>(channel) {\n @Override\n public RestResponse buildResponse(IndicesStatsResponse indicesStatsResponse) throws Exception {\n- Table tab = buildTable(request, concreteIndices, clusterHealthResponse, indicesStatsResponse, clusterStateResponse.getState().metaData());\n+ Table tab = buildTable(request, concreteIndices, clusterHealthResponse, indicesStatsResponse, state.metaData());\n return RestTable.buildResponse(tab, channel);\n }\n });", "filename": "core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java", "status": "modified" }, { "diff": "@@ -24,7 +24,7 @@\n - match:\n $body: |\n /^(green \\s+\n- (open|close) \\s+\n+ open \\s+\n index1 \\s+\n 1 \\s+\n 0 \\s+\n@@ -49,3 +49,24 @@\n (\\d\\d\\d\\d\\-\\d\\d\\-\\d\\dT\\d\\d:\\d\\d:\\d\\d.\\d\\d\\dZ) \\s*\n )\n $/\n+ - do:\n+ indices.close:\n+ index: index1\n+\n+ - do:\n+ cat.indices:\n+ index: index*\n+\n+ - match:\n+ $body: |\n+ /^( \\s+\n+ close \\s+\n+ index1 \\s+\n+ \\s+\n+ \\s+\n+ \\s+\n+ \\s+\n+ \\s+\n+ \\s*\n+ )\n+ $/", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml", "status": "modified" } ] }
{ "body": "Hi guys,\n\nwe have upgraded ElasticSearch from 2.3.0 and reindexed our geolocations so the latitude and longitude are stored separately. We have noticed that some of our visualisation started to fail after we add a filter based on geolocation rectangle. However, map visualisation are working just fine. The problem occurs when we include actual documents. In this case, we get some failed shards (usually 1 out of 5) and error: Invalid shift value (xx) in prefixCoded bytes (is encoded value really a geo point?).\n\nDetails:\nOur geolocation index is based on:\n\n```\n\"dynamic_templates\": [{\n....\n{\n \"ner_geo\": {\n \"mapping\": {\n \"type\": \"geo_point\",\n \"lat_lon\": true\n },\n \"path_match\": \"*.coordinates\"\n }\n }],\n```\n\nThe ok query with the error is as follows. If we change the query size to 0 (map visualizations example), the query completes without problem.\n\n```\n{\n \"size\": 100,\n \"aggs\": {\n \"2\": {\n \"geohash_grid\": {\n \"field\": \"authors.affiliation.coordinates\",\n \"precision\": 2\n }\n }\n },\n \"query\": {\n \"filtered\": {\n \"query\": {\n \"query_string\": {\n \"analyze_wildcard\": true,\n \"query\": \"*\"\n }\n },\n \"filter\": {\n \"bool\": {\n \"must\": [\n {\n \"geo_bounding_box\": {\n \"authors.affiliation.coordinates\": {\n \"top_left\": {\n \"lat\": 61.10078883158897,\n \"lon\": -170.15625\n },\n \"bottom_right\": {\n \"lat\": -64.92354174306496,\n \"lon\": 118.47656249999999\n }\n }\n }\n }\n ],\n \"must_not\": []\n }\n }\n }\n },\n \"highlight\": {\n \"pre_tags\": [\n \"@kibana-highlighted-field@\"\n ],\n \"post_tags\": [\n \"@/kibana-highlighted-field@\"\n ],\n \"fields\": {\n \"*\": {}\n },\n \"require_field_match\": false,\n \"fragment_size\": 2147483647\n }\n}\n```\n\nElasticsearch version**: 2.3.0\nOS version**: Elasticsearch docker image with head plugin, marvel and big desk installed\n\nThank you for your help,\nregards,\nJakub Smid\n", "comments": [ { "body": "@jaksmid could you provide some documents and the stack trace that is produced when you see this exception please?\n", "created_at": "2016-04-06T11:08:13Z" }, { "body": "@jpountz given that this only happens with `size` > 0, I'm wondering if this highlighting trying to highlight the geo field? Perhaps with no documents on a particular shard?\n\n/cc @nknize \n", "created_at": "2016-04-06T11:09:22Z" }, { "body": "I can reproduce something that looks just like this with a lucene test if you apply the patch on https://issues.apache.org/jira/browse/LUCENE-7185\n\nI suspect it may happen with extreme values such as latitude = 90 or longitude = 180 which are used much more in tests with the patch. See seed:\n\n```\n [junit4] Suite: org.apache.lucene.spatial.geopoint.search.TestGeoPointQuery\n [junit4] IGNOR/A 0.01s J1 | TestGeoPointQuery.testRandomBig\n [junit4] > Assumption #1: 'nightly' test group is disabled (@Nightly())\n [junit4] IGNOR/A 0.00s J1 | TestGeoPointQuery.testRandomDistanceHuge\n [junit4] > Assumption #1: 'nightly' test group is disabled (@Nightly())\n [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestGeoPointQuery -Dtests.method=testAllLonEqual -Dtests.seed=4ABB96AB44F4796E -Dtests.locale=id-ID -Dtests.timezone=Pacific/Fakaofo -Dtests.asserts=true -Dtests.file.encoding=US-ASCII\n [junit4] ERROR 0.35s J1 | TestGeoPointQuery.testAllLonEqual <<<\n [junit4] > Throwable #1: java.lang.IllegalArgumentException: Illegal shift value, must be 32..63; got shift=0\n [junit4] > at __randomizedtesting.SeedInfo.seed([4ABB96AB44F4796E:DBB16756B45E397A]:0)\n [junit4] > at org.apache.lucene.spatial.util.GeoEncodingUtils.geoCodedToPrefixCodedBytes(GeoEncodingUtils.java:109)\n [junit4] > at org.apache.lucene.spatial.util.GeoEncodingUtils.geoCodedToPrefixCoded(GeoEncodingUtils.java:89)\n [junit4] > at org.apache.lucene.spatial.geopoint.search.GeoPointPrefixTermsEnum$Range.fillBytesRef(GeoPointPrefixTermsEnum.java:236)\n [junit4] > at org.apache.lucene.spatial.geopoint.search.GeoPointTermsEnum.nextRange(GeoPointTermsEnum.java:71)\n [junit4] > at org.apache.lucene.spatial.geopoint.search.GeoPointPrefixTermsEnum.nextRange(GeoPointPrefixTermsEnum.java:171)\n [junit4] > at org.apache.lucene.spatial.geopoint.search.GeoPointPrefixTermsEnum.nextSeekTerm(GeoPointPrefixTermsEnum.java:190)\n [junit4] > at org.apache.lucene.index.FilteredTermsEnum.next(FilteredTermsEnum.java:212)\n [junit4] > at org.apache.lucene.spatial.geopoint.search.GeoPointTermQueryConstantScoreWrapper$1.scorer(GeoPointTermQueryConstantScoreWrapper.java:110)\n [junit4] > at org.apache.lucene.search.Weight.bulkScorer(Weight.java:135)\n [junit4] > at org.apache.lucene.search.LRUQueryCache$CachingWrapperWeight.bulkScorer(LRUQueryCache.java:644)\n [junit4] > at org.apache.lucene.search.AssertingWeight.bulkScorer(AssertingWeight.java:68)\n [junit4] > at org.apache.lucene.search.BooleanWeight.optionalBulkScorer(BooleanWeight.java:231)\n [junit4] > at org.apache.lucene.search.BooleanWeight.booleanScorer(BooleanWeight.java:297)\n [junit4] > at org.apache.lucene.search.BooleanWeight.bulkScorer(BooleanWeight.java:364)\n [junit4] > at org.apache.lucene.search.LRUQueryCache$CachingWrapperWeight.bulkScorer(LRUQueryCache.java:644)\n [junit4] > at org.apache.lucene.search.AssertingWeight.bulkScorer(AssertingWeight.java:68)\n [junit4] > at org.apache.lucene.search.AssertingWeight.bulkScorer(AssertingWeight.java:68)\n [junit4] > at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:666)\n [junit4] > at org.apache.lucene.search.AssertingIndexSearcher.search(AssertingIndexSearcher.java:91)\n [junit4] > at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:473)\n [junit4] > at org.apache.lucene.spatial.util.BaseGeoPointTestCase.verifyRandomRectangles(BaseGeoPointTestCase.java:835)\n [junit4] > at org.apache.lucene.spatial.util.BaseGeoPointTestCase.verify(BaseGeoPointTestCase.java:763)\n [junit4] > at org.apache.lucene.spatial.util.BaseGeoPointTestCase.testAllLonEqual(BaseGeoPointTestCase.java:495)\n\n```\n", "created_at": "2016-04-07T07:17:50Z" }, { "body": "Hi @clintongormley, thank you for your message. \n\nThe stack trace is as follows:\n`RemoteTransportException[[elasticsearch_4][172.17.0.2:9300][indices:data/read/search[phase/fetch/id]]]; nested: FetchPhaseExecutionException[Fetch Failed [Failed to highlight field [cyberdyne_metadata.ner.mitie.model.DISEASE.tag]]]; nested: NumberFormatException[Invalid shift value (65) in prefixCoded bytes (is encoded value really a geo point?)];\nCaused by: FetchPhaseExecutionException[Fetch Failed [Failed to highlight field [cyberdyne_metadata.ner.mitie.model.DISEASE.tag]]]; nested: NumberFormatException[Invalid shift value (65) in prefixCoded bytes (is encoded value really a geo point?)];\n at org.elasticsearch.search.highlight.PlainHighlighter.highlight(PlainHighlighter.java:123)\n at org.elasticsearch.search.highlight.HighlightPhase.hitExecute(HighlightPhase.java:126)\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:188)\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:592)\n at org.elasticsearch.search.action.SearchServiceTransportAction$FetchByIdTransportHandler.messageReceived(SearchServiceTransportAction.java:408)\n at org.elasticsearch.search.action.SearchServiceTransportAction$FetchByIdTransportHandler.messageReceived(SearchServiceTransportAction.java:405)\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:75)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:300)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: java.lang.NumberFormatException: Invalid shift value (65) in prefixCoded bytes (is encoded value really a geo point?)\n at org.apache.lucene.spatial.util.GeoEncodingUtils.getPrefixCodedShift(GeoEncodingUtils.java:134)\n at org.apache.lucene.spatial.geopoint.search.GeoPointPrefixTermsEnum.accept(GeoPointPrefixTermsEnum.java:219)\n at org.apache.lucene.index.FilteredTermsEnum.next(FilteredTermsEnum.java:232)\n at org.apache.lucene.search.TermCollectingRewrite.collectTerms(TermCollectingRewrite.java:67)\n at org.apache.lucene.search.ScoringRewrite.rewrite(ScoringRewrite.java:108)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:220)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:227)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:113)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:113)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.getWeightedSpanTerms(WeightedSpanTermExtractor.java:505)\n at org.apache.lucene.search.highlight.QueryScorer.initExtractor(QueryScorer.java:218)\n at org.apache.lucene.search.highlight.QueryScorer.init(QueryScorer.java:186)\n at org.apache.lucene.search.highlight.Highlighter.getBestTextFragments(Highlighter.java:195)\n at org.elasticsearch.search.highlight.PlainHighlighter.highlight(PlainHighlighter.java:108)\n ... 12 more`\n\nThe field cyberdyne_metadata.ner.mitie.model.DISEASE.tag should not be a geopoint according to the dynamic template.\n", "created_at": "2016-04-07T07:20:27Z" }, { "body": "@rmuir oh, good catch\n@clintongormley The stack trace indeed suggests that the issue is with highlighting on the geo field. Regardless of this bug, I wonder that we should fail early when highlighting on anything but text fields and/or exclude non-text fields from wildcard matching.\n", "created_at": "2016-04-07T07:33:06Z" }, { "body": "> I wonder that we should fail early when highlighting on anything but text fields and/or exclude non-text fields from wildcard matching.\n\n+1 to fail early if the user explicitly defined a non text field to highlight on and exclude non text fields when using wildcards\n", "created_at": "2016-04-07T08:40:43Z" }, { "body": "I was running into this bug during a live demo... Yes I know, I've should have tested all demo scenario's after updating ES :grimacing: . Anyway, +1 for fixing this!\n", "created_at": "2016-04-17T08:09:07Z" }, { "body": "-I´m having the same error. It's happends with doc having location and trying to use \n\"highlight\": {... \"require_field_match\": false ...}\n\nthanks!\n", "created_at": "2016-04-18T21:45:45Z" }, { "body": "I'm unclear as to what exactly is going on here, but I'm running into the same issue. I'm attempting to do a geo bounding box in Kibana while viewing the results in the Discover tab. Disabling highlighting in Kibana fixes the issue, but I would actually like to keep highlighting enabled, since it's super useful otherwise.\n\nIt sounds from what others are saying that this should fail when querying on _any_ non-string field, but I am not getting the same failure on numeric fields. Is it just an issue with geoip fields? I suppose another nice thing would be to explicitly allow for configuration of which fields should be highlighted in Kibana.\n", "created_at": "2016-05-03T01:52:24Z" }, { "body": "Please fix this issue.\n", "created_at": "2016-05-03T10:40:19Z" }, { "body": "I wrote two tests so that everyone can reproduce what happens easily: https://github.com/brwe/elasticsearch/commit/ffa242941e4ede34df67301f7b9d46ea8719cc22\n\nIn brief:\nThe plain highlighter tries to highlight whatever the BBQuery provides as terms in the text \"60,120\" if that is how the `geo_point` was indexed (if the point was indexed with `{\"lat\": 60, \"lon\": 120}` nothing will happen because we cannot even extract anything from the source). The terms in the text are provided to Lucene as a token steam with a keyword analyzer.\nIn Lucene, this token stream is converted this via a longish call stack into a terms enum. But this terms enum is pulled from the query that contains the terms that are to be highlighted. In this case we call `GeoPointMultiTermQuery.getTermsEnum(terms)` which wraps the term in a `GeoPointTermsEnum`. This enum tries to convert a prefix coded geo term back to something else but because it is really just the string \"60,120\" it throws the exception we see. \n\nI am unsure yet how a correct fix would look like but do wonder why we try highlingting on numeric and geo fields at all? If anyone has an opinion let me know.\n", "created_at": "2016-05-04T17:50:50Z" }, { "body": "I missed @jpountz comment:\n\n> Regardless of this bug, I wonder that we should fail early when highlighting on anything but text fields and/or exclude non-text fields from wildcard matching.\n\nI agree. Will make a pr for that.\n", "created_at": "2016-05-04T17:57:01Z" }, { "body": "@brwe you did something similar before: https://github.com/elastic/elasticsearch/pull/11364 - i would have thought that that PR should have fixed this issue?\n", "created_at": "2016-05-05T08:17:58Z" }, { "body": "@clintongormley Yes you are right. #11364 only addresses problems one gets when the way text is indexed is not compatible with the highlighter used. I do not remember why I did not exclude numeric fields then. \n", "created_at": "2016-05-05T09:15:10Z" }, { "body": "Great work. Tnx \n\n:sunglasses: \n", "created_at": "2016-05-07T13:15:25Z" }, { "body": "This is not fixed in 2.3.3 yet, correct?\n", "created_at": "2016-05-19T07:09:10Z" }, { "body": "@rodgermoore It should be fixed in 2.3.3, can you still reproduce the problem?\n", "created_at": "2016-05-19T07:13:30Z" }, { "body": "Ubuntu 14.04-04\nElasticsearch 2.3.3\nKibana 4.5.1\nJVM 1.8.0_66\n\nI am still able to reproduce this error in Kibana 4.5.1. I have a dashboard with a search panel with highlighting enabled. On the same Dashboard I have a tile map and after selecting an area in this map using the select function (draw a rectangle) I got the \"Invalid shift value (xx) in prefixCoded bytes (is encoded value really a geo point?)\" error.\n\nWhen I alter the json settings file of the search panel and remove highlighting the error does not pop-up.\n", "created_at": "2016-05-19T11:44:32Z" }, { "body": "@rodgermoore I cannot reproduce this but I might do something different from you. Here is my dashboard:\n\n![image](https://cloud.githubusercontent.com/assets/4320215/15393472/bd2b4cf2-1dcd-11e6-8ac1-cf6ba5e995b7.png)\n\nIs that what you did?\nCan you attach the whole stacktrace from the elasticsearch logs again? If you did not change the logging config the full search request should be in there. Also, if you can please add an example document.\n", "created_at": "2016-05-19T13:07:51Z" }, { "body": "I see you used \"text:blah\". I did not enter a search at all (so used the default wildcard) and then did the aggregation on the tile map. This resulted in the error. \n", "created_at": "2016-05-19T13:12:50Z" }, { "body": "I can remove the query and still get a result. Can you please attach the relevant part of the elasticsearch log? \n", "created_at": "2016-05-19T13:16:46Z" }, { "body": "Here you go:\n\n```\n[2016-05-19 15:23:08,270][DEBUG][action.search ] [Black King] All shards failed for phase: [query_fetch]\nRemoteTransportException[[Black King][192.168.48.18:9300][indices:data/read/search[phase/query+fetch]]]; nested: FetchPhaseExecutionException[Fetch Failed [Failed to highlight field [tags.nl]]]; nested: NumberFormatException[Invalid shift value (115) in prefixCoded bytes (is encoded value really a geo point?)];\nCaused by: FetchPhaseExecutionException[Fetch Failed [Failed to highlight field [tags.nl]]]; nested: NumberFormatException[Invalid shift value (115) in prefixCoded bytes (is encoded value really a geo point?)];\n at org.elasticsearch.search.highlight.PlainHighlighter.highlight(PlainHighlighter.java:123)\n at org.elasticsearch.search.highlight.HighlightPhase.hitExecute(HighlightPhase.java:140)\n at org.elasticsearch.search.fetch.FetchPhase.execute(FetchPhase.java:188)\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:480)\n at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryFetchTransportHandler.messageReceived(SearchServiceTransportAction.java:392)\n at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryFetchTransportHandler.messageReceived(SearchServiceTransportAction.java:389)\n at org.elasticsearch.transport.TransportRequestHandler.messageReceived(TransportRequestHandler.java:33)\n at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:75)\n at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:376)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: java.lang.NumberFormatException: Invalid shift value (115) in prefixCoded bytes (is encoded value really a geo point?)\n at org.apache.lucene.spatial.util.GeoEncodingUtils.getPrefixCodedShift(GeoEncodingUtils.java:134)\n at org.apache.lucene.spatial.geopoint.search.GeoPointPrefixTermsEnum.accept(GeoPointPrefixTermsEnum.java:219)\n at org.apache.lucene.index.FilteredTermsEnum.next(FilteredTermsEnum.java:232)\n at org.apache.lucene.search.TermCollectingRewrite.collectTerms(TermCollectingRewrite.java:67)\n at org.apache.lucene.search.ScoringRewrite.rewrite(ScoringRewrite.java:108)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:220)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:227)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:113)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.extract(WeightedSpanTermExtractor.java:113)\n at org.apache.lucene.search.highlight.WeightedSpanTermExtractor.getWeightedSpanTerms(WeightedSpanTermExtractor.java:505)\n at org.apache.lucene.search.highlight.QueryScorer.initExtractor(QueryScorer.java:218)\n at org.apache.lucene.search.highlight.QueryScorer.init(QueryScorer.java:186)\n at org.apache.lucene.search.highlight.Highlighter.getBestTextFragments(Highlighter.java:195)\n at org.elasticsearch.search.highlight.PlainHighlighter.highlight(PlainHighlighter.java:108)\n ... 12 more\n```\n\nWe are using dynamic mapping and we dynamically analyse all string fields using the Dutch language analyzer. All string fields get a non analyzed field: \"field.raw\" and a Dutch analyzed field \"field.nl\". \n", "created_at": "2016-05-19T13:37:15Z" }, { "body": "Ah...I was hoping to get the actual request but it is not in the stacktrace after all. Can you also add the individual requests from the panels in your dashboard (in the spy tab) and a screenshot so I can see what the geo bounding box filter filters on? I could then try to reconstruct the request.\n\nAlso, are you sure you upgraded all nodes in the cluster? Check with `curl -XGET \"http://hostname:port/_nodes\"`. Would be great if you could add the output of that here too just to be sure. \n", "created_at": "2016-05-19T13:46:59Z" }, { "body": "I have got the exact same issue. I am running 2.3.3. All my nodes (1) are upgraded.\n", "created_at": "2016-05-19T14:17:36Z" }, { "body": "<img width=\"1676\" alt=\"screen shot 2016-05-19 at 16 29 15\" src=\"https://cloud.githubusercontent.com/assets/78766/15397413/7858191c-1de0-11e6-802b-773f4a7ecf79.png\">\n", "created_at": "2016-05-19T14:42:02Z" }, { "body": "Here you go.\n\nTile Map Query:\n\n```\n{\n \"query\": {\n \"filtered\": {\n \"query\": {\n \"query_string\": {\n \"analyze_wildcard\": true,\n \"query\": \"*\"\n }\n },\n \"filter\": {\n \"bool\": {\n \"must\": [\n {\n \"geo_bounding_box\": {\n \"SomeGeoField\": {\n \"top_left\": {\n \"lat\": REMOVED,\n \"lon\": REMOVED\n },\n \"bottom_right\": {\n \"lat\": REMOVED,\n \"lon\": REMOVED\n }\n }\n },\n \"$state\": {\n \"store\": \"appState\"\n }\n },\n {\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n }\n },\n {\n \"range\": {\n \"@timestamp\": {\n \"gte\": 1458485686484,\n \"lte\": 1463666086484,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"must_not\": []\n }\n }\n }\n },\n \"size\": 0,\n \"aggs\": {\n \"2\": {\n \"geohash_grid\": {\n \"field\": \"SomeGeoField\",\n \"precision\": 5\n }\n }\n }\n}\n```\n\nI'm using a single node cluster, here's the info:\n\n```\n{\n \"cluster_name\": \"elasticsearch\",\n \"nodes\": {\n \"RtBthRfeSOSud1XfRRAkSA\": {\n \"name\": \"Black King\",\n \"transport_address\": \"192.168.48.18:9300\",\n \"host\": \"192.168.48.18\",\n \"ip\": \"192.168.48.18\",\n \"version\": \"2.3.3\",\n \"build\": \"218bdf1\",\n \"http_address\": \"192.168.48.18:9200\",\n \"settings\": {\n \"pidfile\": \"/var/run/elasticsearch/elasticsearch.pid\",\n \"cluster\": {\n \"name\": \"elasticsearch\"\n },\n \"path\": {\n \"conf\": \"/etc/elasticsearch\",\n \"data\": \"/var/lib/elasticsearch\",\n \"logs\": \"/var/log/elasticsearch\",\n \"home\": \"/usr/share/elasticsearch\",\n \"repo\": [\n \"/home/somename/es_backups\"\n ]\n },\n \"name\": \"Black King\",\n \"client\": {\n \"type\": \"node\"\n },\n \"foreground\": \"false\",\n \"config\": {\n \"ignore_system_properties\": \"true\"\n },\n \"network\": {\n \"host\": \"0.0.0.0\"\n }\n },\n \"os\": {\n \"refresh_interval_in_millis\": 1000,\n \"name\": \"Linux\",\n \"arch\": \"amd64\",\n \"version\": \"3.19.0-59-generic\",\n \"available_processors\": 8,\n \"allocated_processors\": 8\n },\n \"process\": {\n \"refresh_interval_in_millis\": 1000,\n \"id\": 1685,\n \"mlockall\": false\n },\n \"jvm\": {\n \"pid\": 1685,\n \"version\": \"1.8.0_66\",\n \"vm_name\": \"Java HotSpot(TM) 64-Bit Server VM\",\n \"vm_version\": \"25.66-b17\",\n \"vm_vendor\": \"Oracle Corporation\",\n \"start_time_in_millis\": 1463663018422,\n \"mem\": {\n \"heap_init_in_bytes\": 6442450944,\n \"heap_max_in_bytes\": 6372720640,\n \"non_heap_init_in_bytes\": 2555904,\n \"non_heap_max_in_bytes\": 0,\n \"direct_max_in_bytes\": 6372720640\n },\n \"gc_collectors\": [\n \"ParNew\",\n \"ConcurrentMarkSweep\"\n ],\n \"memory_pools\": [\n \"Code Cache\",\n \"Metaspace\",\n \"Compressed Class Space\",\n \"Par Eden Space\",\n \"Par Survivor Space\",\n \"CMS Old Gen\"\n ],\n \"using_compressed_ordinary_object_pointers\": \"true\"\n },\n \"thread_pool\": {\n \"force_merge\": {\n \"type\": \"fixed\",\n \"min\": 1,\n \"max\": 1,\n \"queue_size\": -1\n },\n \"percolate\": {\n \"type\": \"fixed\",\n \"min\": 8,\n \"max\": 8,\n \"queue_size\": 1000\n },\n \"fetch_shard_started\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 16,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n },\n \"listener\": {\n \"type\": \"fixed\",\n \"min\": 4,\n \"max\": 4,\n \"queue_size\": -1\n },\n \"index\": {\n \"type\": \"fixed\",\n \"min\": 8,\n \"max\": 8,\n \"queue_size\": 200\n },\n \"refresh\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 4,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n },\n \"suggest\": {\n \"type\": \"fixed\",\n \"min\": 8,\n \"max\": 8,\n \"queue_size\": 1000\n },\n \"generic\": {\n \"type\": \"cached\",\n \"keep_alive\": \"30s\",\n \"queue_size\": -1\n },\n \"warmer\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 4,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n },\n \"search\": {\n \"type\": \"fixed\",\n \"min\": 13,\n \"max\": 13,\n \"queue_size\": 1000\n },\n \"flush\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 4,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n },\n \"fetch_shard_store\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 16,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n },\n \"management\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 5,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n },\n \"get\": {\n \"type\": \"fixed\",\n \"min\": 8,\n \"max\": 8,\n \"queue_size\": 1000\n },\n \"bulk\": {\n \"type\": \"fixed\",\n \"min\": 8,\n \"max\": 8,\n \"queue_size\": 50\n },\n \"snapshot\": {\n \"type\": \"scaling\",\n \"min\": 1,\n \"max\": 4,\n \"keep_alive\": \"5m\",\n \"queue_size\": -1\n }\n },\n \"transport\": {\n \"bound_address\": [\n \"[::]:9300\"\n ],\n \"publish_address\": \"192.168.48.18:9300\",\n \"profiles\": {}\n },\n \"http\": {\n \"bound_address\": [\n \"[::]:9200\"\n ],\n \"publish_address\": \"192.168.48.18:9200\",\n \"max_content_length_in_bytes\": 104857600\n },\n \"plugins\": [],\n \"modules\": [\n {\n \"name\": \"lang-expression\",\n \"version\": \"2.3.3\",\n \"description\": \"Lucene expressions integration for Elasticsearch\",\n \"jvm\": true,\n \"classname\": \"org.elasticsearch.script.expression.ExpressionPlugin\",\n \"isolated\": true,\n \"site\": false\n },\n {\n \"name\": \"lang-groovy\",\n \"version\": \"2.3.3\",\n \"description\": \"Groovy scripting integration for Elasticsearch\",\n \"jvm\": true,\n \"classname\": \"org.elasticsearch.script.groovy.GroovyPlugin\",\n \"isolated\": true,\n \"site\": false\n },\n {\n \"name\": \"reindex\",\n \"version\": \"2.3.3\",\n \"description\": \"_reindex and _update_by_query APIs\",\n \"jvm\": true,\n \"classname\": \"org.elasticsearch.index.reindex.ReindexPlugin\",\n \"isolated\": true,\n \"site\": false\n }\n ]\n }\n }\n}\n```\n\nScreenshot, I had to clear out the data:\n\n![error_es](https://cloud.githubusercontent.com/assets/12231719/15397399/668a374c-1de0-11e6-903d-f929a2d9f0b2.PNG)\n", "created_at": "2016-05-19T14:42:12Z" }, { "body": "@rodgermoore does the query you provided work correctly? You said that it started working once you deleted the highlighting and this query doesn't contain highlighting. Could you provide the query that doesn't work?\n", "created_at": "2016-05-19T14:45:25Z" }, { "body": "It does has highlighting enabled. This is the json for the search panel: \n\n```\n{\n \"index\": \"someindex\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": [],\n \"highlight\": {\n \"pre_tags\": [\n \"@kibana-highlighted-field@\"\n ],\n \"post_tags\": [\n \"@/kibana-highlighted-field@\"\n ],\n \"fields\": {\n \"*\": {}\n },\n \"require_field_match\": false,\n \"fragment_size\": 2147483647\n }\n}\n```\n\nI can't show the actual data so I selected to show only the timestamp field in the search panel in the screenshot...\n\nWhen I change the json of the search panel to:\n\n```\n{\n \"index\": \"someindex\",\n \"filter\": [],\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n }\n}\n```\n\nThe error disappears.\n", "created_at": "2016-05-19T14:51:27Z" }, { "body": "If my understanding of the patch is correct, it shouldn't matter whether Kibana is including the highlighting field. Elasticsearch should only be trying to highlight string fields, even if a wildcard is being used.\n", "created_at": "2016-05-19T14:54:44Z" }, { "body": "Ok, I managed to reproduce it on 2.3.3. It happens with `\"geohash\": true` in the mapping. \n\nSteps are:\n\n```\nDELETE test\nPUT test \n{\n \"mappings\": {\n \"doc\": {\n \"properties\": {\n \"point\": {\n \"type\": \"geo_point\",\n \"geohash\": true\n }\n }\n }\n }\n}\n\nPUT test/doc/1\n{\n \"point\": \"60.12,100.34\"\n}\n\nPOST test/_search\n{\n \"query\": {\n \"geo_bounding_box\": {\n \"point\": {\n \"top_left\": {\n \"lat\": 61.10078883158897,\n \"lon\": -170.15625\n },\n \"bottom_right\": {\n \"lat\": -64.92354174306496,\n \"lon\": 118.47656249999999\n }\n }\n }\n },\n \"highlight\": {\n \"fields\": {\n \"*\": {}\n }\n }\n}\n```\n\nSorry, I did not think of that. I work on another fix.\n", "created_at": "2016-05-19T16:23:44Z" } ], "number": 17537, "title": "Invalid shift value (xx) in prefixCoded bytes (is encoded value really a geo point?)" }
{ "body": "Geo queries and plain highlighter do not seem to work well\ntogether (https://issues.apache.org/jira/browse/LUCENE-7293)\nso we need to skip all geo related queries when we highlight.\n\nhopefully closes #17537\n\nOnly plain highlighter seems to be affected.\nI am worried that we might need to take care of GeoPointInBBoxQueryImpl too although I do not see how a rewritten query can be used for highlighting, see TODO below. \n\n@nik9000 since this was your idea, maybe you want to take a look?\n", "number": 18495, "review_comments": [ { "body": "Do we also expose `GeoPointDistanceQuery` or `GeoPointInPolygonQuery`?\n", "created_at": "2016-05-20T18:24:44Z" }, { "body": "Sorry, nevermind: both of those queries also subclass `GeoPoinInBBoxQuery`, so this check is sufficient!\n", "created_at": "2016-05-20T18:33:40Z" } ], "title": "Skip all geo point queries in plain highlighter" }
{ "commits": [ { "message": "skip all geo point queries in plain highlighter\n\nGeo queries and plain highlighter do not seem to work well\ntogether (https://issues.apache.org/jira/browse/LUCENE-7293)\nso we need to skip all geo related queries when we highlight.\n\ncloses #17537" } ], "files": [ { "diff": "@@ -24,6 +24,7 @@\n import org.apache.lucene.search.highlight.QueryScorer;\n import org.apache.lucene.search.highlight.WeightedSpanTerm;\n import org.apache.lucene.search.highlight.WeightedSpanTermExtractor;\n+import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery;\n import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;\n import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;\n \n@@ -87,6 +88,12 @@ protected void extractUnknownQuery(Query query,\n }\n }\n \n+ protected void extract(Query query, float boost, Map<String, WeightedSpanTerm> terms) throws IOException {\n+ // skip all geo queries, see https://issues.apache.org/jira/browse/LUCENE-7293 and\n+ // https://github.com/elastic/elasticsearch/issues/17537\n+ if (query instanceof GeoPointInBBoxQuery == false) {\n+ super.extract(query, boost, terms);\n+ }\n+ }\n }\n-\n }", "filename": "core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java", "status": "modified" }, { "diff": "@@ -2553,16 +2553,22 @@ private void phraseBoostTestCase(String highlighterType) {\n assertHighlight(response, 0, \"field1\", 0, 1, highlightedMatcher);\n }\n \n- public void testGeoFieldHighlighting() throws IOException {\n+ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException {\n // check that we do not get an exception for geo_point fields in case someone tries to highlight\n- // it accidential with a wildcard\n+ // it accidentially with a wildcard\n // see https://github.com/elastic/elasticsearch/issues/17537\n XContentBuilder mappings = jsonBuilder();\n mappings.startObject();\n mappings.startObject(\"type\")\n .startObject(\"properties\")\n .startObject(\"geo_point\")\n .field(\"type\", \"geo_point\")\n+ .field(\"geohash\", true)\n+ .endObject()\n+ .startObject(\"text\")\n+ .field(\"type\", \"text\")\n+ .field(\"term_vector\", \"with_positions_offsets_payloads\")\n+ .field(\"index_options\", \"offsets\")\n .endObject()\n .endObject()\n .endObject();\n@@ -2572,14 +2578,19 @@ public void testGeoFieldHighlighting() throws IOException {\n ensureYellow();\n \n client().prepareIndex(\"test\", \"type\", \"1\")\n- .setSource(jsonBuilder().startObject().field(\"geo_point\", \"60.12,100.34\").endObject())\n+ .setSource(jsonBuilder().startObject().field(\"text\", \"Arbitrary text field which will should not cause a failure\").endObject())\n .get();\n refresh();\n+ String highlighterType = randomFrom(\"plain\", \"fvh\", \"postings\");\n+ QueryBuilder query = QueryBuilders.boolQuery().should(QueryBuilders.geoBoundingBoxQuery(\"geo_point\")\n+ .setCorners(61.10078883158897, -170.15625, -64.92354174306496, 118.47656249999999))\n+ .should(QueryBuilders.termQuery(\"text\", \"failure\"));\n SearchResponse search = client().prepareSearch().setSource(\n- new SearchSourceBuilder().query(QueryBuilders.geoBoundingBoxQuery(\"geo_point\").setCorners(61.10078883158897, -170.15625,\n- -64.92354174306496, 118.47656249999999)).highlighter(new HighlightBuilder().field(\"*\"))).get();\n+ new SearchSourceBuilder().query(query)\n+ .highlighter(new HighlightBuilder().field(\"*\").highlighterType(highlighterType))).get();\n assertNoFailures(search);\n assertThat(search.getHits().totalHits(), equalTo(1L));\n+ assertThat(search.getHits().getAt(0).highlightFields().get(\"text\").fragments().length, equalTo(1));\n }\n \n public void testKeywordFieldHighlighting() throws IOException {", "filename": "core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java", "status": "modified" }, { "diff": "@@ -19,12 +19,28 @@\n \n package org.elasticsearch.search.highlight;\n \n+import org.apache.lucene.analysis.Analyzer;\n import org.apache.lucene.analysis.MockAnalyzer;\n+import org.apache.lucene.analysis.standard.StandardAnalyzer;\n import org.apache.lucene.index.Term;\n+import org.apache.lucene.search.BooleanClause;\n+import org.apache.lucene.search.BooleanQuery;\n import org.apache.lucene.search.PhraseQuery;\n import org.apache.lucene.search.Query;\n+import org.apache.lucene.search.TermQuery;\n+import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;\n import org.apache.lucene.search.highlight.QueryScorer;\n+import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery;\n+import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery;\n+import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery;\n import org.apache.lucene.util.LuceneTestCase;\n+import org.elasticsearch.index.analysis.FieldNameAnalyzer;\n+\n+import java.io.IOException;\n+import java.util.HashMap;\n+import java.util.Map;\n+\n+import static org.hamcrest.Matchers.equalTo;\n \n public class PlainHighlighterTests extends LuceneTestCase {\n \n@@ -39,4 +55,36 @@ public void testHighlightPhrase() throws Exception {\n assertArrayEquals(new String[] {\"bar <B>foo</B> <B>bar</B> foo\"}, frags);\n }\n \n+ public void checkGeoQueryHighlighting(Query geoQuery) throws IOException, InvalidTokenOffsetsException {\n+ Map analysers = new HashMap<String, Analyzer>();\n+ analysers.put(\"text\", new StandardAnalyzer());\n+ FieldNameAnalyzer fieldNameAnalyzer = new FieldNameAnalyzer(analysers);\n+ Query termQuery = new TermQuery(new Term(\"text\", \"failure\"));\n+ Query boolQuery = new BooleanQuery.Builder().add(new BooleanClause(geoQuery, BooleanClause.Occur.SHOULD))\n+ .add(new BooleanClause(termQuery, BooleanClause.Occur.SHOULD)).build();\n+ org.apache.lucene.search.highlight.Highlighter highlighter =\n+ new org.apache.lucene.search.highlight.Highlighter(new CustomQueryScorer(boolQuery));\n+ String fragment = highlighter.getBestFragment(fieldNameAnalyzer.tokenStream(\"text\", \"Arbitrary text field which should not cause \" +\n+ \"a failure\"), \"Arbitrary text field which should not cause a failure\");\n+ assertThat(fragment, equalTo(\"Arbitrary text field which should not cause a <B>failure</B>\"));\n+ // TODO: This test will fail if we pass in an instance of GeoPointInBBoxQueryImpl too. Should we also find a way to work around that\n+ // or can the query not be rewritten before it is passed into the highlighter?\n+ }\n+\n+ public void testGeoPointInBBoxQueryHighlighting() throws IOException, InvalidTokenOffsetsException {\n+ Query geoQuery = new GeoPointDistanceQuery(\"geo_point\", -64.92354174306496, -170.15625, 5576757);\n+ checkGeoQueryHighlighting(geoQuery);\n+ }\n+\n+ public void testGeoPointDistanceQueryHighlighting() throws IOException, InvalidTokenOffsetsException {\n+ Query geoQuery = new GeoPointInBBoxQuery(\"geo_point\", -64.92354174306496, 61.10078883158897, -170.15625, 118.47656249999999);\n+ checkGeoQueryHighlighting(geoQuery);\n+ }\n+\n+ public void testGeoPointInPolygonQueryHighlighting() throws IOException, InvalidTokenOffsetsException {\n+ double[] polyLats = new double[]{0, 60, 0, 0};\n+ double[] polyLons = new double[]{0, 60, 90, 0};\n+ Query geoQuery = new GeoPointInPolygonQuery(\"geo_point\", polyLats, polyLons);\n+ checkGeoQueryHighlighting(geoQuery);\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java", "status": "modified" } ] }
{ "body": "Have 4 nodes, 3 are sharing the node attribute B, and 1 is using the node attribute A.\n\nPer node specification (https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes), the following works: \n\nThe following returns 3 nodes:\n\n```\ncurl -XGET \"http://localhost:9200/_nodes/pod:B?pretty\"\n```\n\nThe following returns 1 node:\n\n```\ncurl -XGET \"http://localhost:9200/_nodes/pod:A?pretty\"\n```\n\nThe following returns 4 nodes:\n\n```\ncurl -XGET \"http://localhost:9200/_nodes/pod:B,pod:A?pretty\"\n```\n\nHowever, the `_only_nodes` specification for search preference does not work (https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html#search-request-preference):\n\n```\ncurl -XGET \"http://localhost:9200/only_node_test/_search?preference=_only_nodes:pod:B,pod:A\"\n\n{\n \"error\": \"IllegalArgumentException[No data node with critera [pod:B,pod:A] found]\",\n \"status\": 500\n}\n```\n\nWhat is the right syntax for the argument to use multiple node attributes as part of _only_nodes search preference?\n", "comments": [ { "body": "Seems like this affects all multiple forms of the specification, for example, the following multiple-node-id spec returns 3 nodes:\n\n```\ncurl -XGET \"http://localhost:9200/_nodes/Kbqa_XmMTIOlMD2iMDivRg,y4cT5bIRSliW0_UUs8OXkA,ebDNoVsMQSWdFa-UA66uTw?pretty\"\n```\n\nBut when used with _only_nodes search preference, it also doesn't accept it:\n\n```\ncurl -XGET \"http://localhost:9200/only_node_test/_search?preference=_only_nodes:Kbqa_XmMTIOlMD2iMDivRg,y4cT5bIRSliW0_UUs8OXkA,ebDNoVsMQSWdFa-UA66uTw\"\n\n{\n \"error\": \"IllegalArgumentException[No data node with critera [Kbqa_XmMTIOlMD2iMDivRg,y4cT5bIRSliW0_UUs8OXkA,ebDNoVsMQSWdFa-UA66uTw] found]\",\n \"status\": 500\n}\n```\n", "created_at": "2015-08-06T17:54:20Z" }, { "body": "Can we also fix this in the 2.x branch?\n", "created_at": "2016-05-10T17:36:32Z" } ], "number": 12700, "title": "Multiple node spec does not work with _only_nodes search preference" }
{ "body": "- Handle multiple attributes/names (coma separated): _only_nodes:a,b,c\n- Shuffle the nodes that match the preferences.\n\nFix #12546\nFix #12700\n", "number": 18483, "review_comments": [ { "body": "There's an extra escaped quote here?\n", "created_at": "2016-05-20T19:28:54Z" }, { "body": "Should this local variable be renamed `nodeAttributes`?\n", "created_at": "2016-05-20T19:30:24Z" }, { "body": "Nit: `No` -> `no`.\n", "created_at": "2016-05-20T19:30:40Z" } ], "title": "Fix _only_nodes preferences" }
{ "commits": [ { "message": "Fixes for _only_nodes preferences:\n * Handle multiple attributes/name (coma separated).\n * Shuffle the nodes that match the preferences.\n\nFix #12546\nFix #12700" } ], "files": [ { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.common.Randomness;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.collect.MapBuilder;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n@@ -331,42 +332,45 @@ public ShardIterator replicaFirstActiveInitializingShardsIt() {\n \n public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) {\n ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());\n- // fill it in a randomized fashion\n- for (int i = 0; i < activeShards.size(); i++) {\n- ShardRouting shardRouting = activeShards.get(i);\n+ int seed = shuffler.nextSeed();\n+ for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {\n if (nodeId.equals(shardRouting.currentNodeId())) {\n ordered.add(shardRouting);\n }\n }\n- for (int i = 0; i < allInitializingShards.size(); i++) {\n- ShardRouting shardRouting = allInitializingShards.get(i);\n+ for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {\n if (nodeId.equals(shardRouting.currentNodeId())) {\n ordered.add(shardRouting);\n }\n }\n return new PlainShardIterator(shardId, ordered);\n }\n \n+ public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttributes, DiscoveryNodes discoveryNodes) {\n+ return onlyNodeSelectorActiveInitializingShardsIt(new String[] {nodeAttributes}, discoveryNodes);\n+ }\n+\n /**\n * Returns shards based on nodeAttributes given such as node name , node attribute, node IP\n * Supports node specifications in cluster API\n */\n- public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttribute, DiscoveryNodes discoveryNodes) {\n+ public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String[] nodeAttributes, DiscoveryNodes discoveryNodes) {\n ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());\n- Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttribute));\n-\n- for (ShardRouting shardRouting : activeShards) {\n+ Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttributes));\n+ int seed = shuffler.nextSeed();\n+ for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {\n if (selectedNodes.contains(shardRouting.currentNodeId())) {\n ordered.add(shardRouting);\n }\n }\n- for (ShardRouting shardRouting : allInitializingShards) {\n+ for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {\n if (selectedNodes.contains(shardRouting.currentNodeId())) {\n ordered.add(shardRouting);\n }\n }\n if (ordered.isEmpty()) {\n- throw new IllegalArgumentException(\"No data node with criteria [\" + nodeAttribute + \"] found\");\n+ throw new IllegalArgumentException(\"no data nodes with critera(s) \" +\n+ Strings.arrayToCommaDelimitedString(nodeAttributes) + \"] found for shard:\" + shardId());\n }\n return new PlainShardIterator(shardId, ordered);\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java", "status": "modified" }, { "diff": "@@ -177,8 +177,8 @@ private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable index\n ensureNodeIdExists(nodes, nodeId);\n return indexShard.onlyNodeActiveInitializingShardsIt(nodeId);\n case ONLY_NODES:\n- String nodeAttribute = preference.substring(Preference.ONLY_NODES.type().length() + 1);\n- return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttribute, nodes);\n+ String nodeAttributes = preference.substring(Preference.ONLY_NODES.type().length() + 1);\n+ return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttributes.split(\",\"), nodes);\n default:\n throw new IllegalArgumentException(\"unknown preference [\" + preferenceType + \"]\");\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java", "status": "modified" }, { "diff": "@@ -320,6 +320,24 @@ public void testNodeSelectorRouting(){\n assertThat(shardsIterator.size(), equalTo(1));\n assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node2\"));\n \n+ shardsIterator = clusterState.routingTable().index(\"test\").shard(0)\n+ .onlyNodeSelectorActiveInitializingShardsIt(new String[] {\"disk:eph*\",\"disk:ebs\"},clusterState.nodes());\n+ assertThat(shardsIterator.size(), equalTo(2));\n+ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node2\"));\n+ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node1\"));\n+\n+ shardsIterator = clusterState.routingTable().index(\"test\").shard(0)\n+ .onlyNodeSelectorActiveInitializingShardsIt(new String[] {\"disk:*\", \"invalid_name\"},clusterState.nodes());\n+ assertThat(shardsIterator.size(), equalTo(2));\n+ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node2\"));\n+ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node1\"));\n+\n+ shardsIterator = clusterState.routingTable().index(\"test\").shard(0)\n+ .onlyNodeSelectorActiveInitializingShardsIt(new String[] {\"disk:*\", \"disk:*\"},clusterState.nodes());\n+ assertThat(shardsIterator.size(), equalTo(2));\n+ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node2\"));\n+ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo(\"node1\"));\n+\n try {\n shardsIterator = clusterState.routingTable().index(\"test\").shard(0).onlyNodeSelectorActiveInitializingShardsIt(\"welma\", clusterState.nodes());\n fail(\"should have raised illegalArgumentException\");", "filename": "core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java", "status": "modified" }, { "diff": "@@ -19,22 +19,30 @@\n \n package org.elasticsearch.search.preference;\n \n+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;\n+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;\n+import org.elasticsearch.action.search.SearchRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.cluster.health.ClusterHealthStatus;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.test.ESIntegTestCase;\n \n import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.HashSet;\n+import java.util.Set;\n \n import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;\n import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n-import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.not;\n+import static org.hamcrest.Matchers.is;\n+import static org.hamcrest.Matchers.greaterThan;\n+import static org.hamcrest.Matchers.greaterThanOrEqualTo;\n \n @ESIntegTestCase.ClusterScope(minNumDataNodes = 2)\n public class SearchPreferenceIT extends ESIntegTestCase {\n@@ -155,4 +163,63 @@ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Except\n assertThat(e.getMessage(), is(\"No data node with id[DOES-NOT-EXIST] found\"));\n }\n }\n+\n+ public void testNodesOnlyRandom() throws Exception {\n+ assertAcked(prepareCreate(\"test\").setSettings(\n+ //this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data\n+ Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))));\n+ ensureGreen();\n+ client().prepareIndex(\"test\", \"type1\").setSource(\"field1\", \"value1\").execute().actionGet();\n+ refresh();\n+\n+ final Client client = internalCluster().smartClient();\n+ SearchRequestBuilder request = client.prepareSearch(\"test\")\n+ .setQuery(matchAllQuery()).setPreference(\"_only_nodes:*,nodes*\"); // multiple wildchar to cover multi-param usecase\n+ assertSearchOnRandomNodes(request);\n+\n+ request = client.prepareSearch(\"test\")\n+ .setQuery(matchAllQuery()).setPreference(\"_only_nodes:*\");\n+ assertSearchOnRandomNodes(request);\n+\n+ ArrayList<String> allNodeIds = new ArrayList<>();\n+ ArrayList<String> allNodeNames = new ArrayList<>();\n+ ArrayList<String> allNodeHosts = new ArrayList<>();\n+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();\n+ for (NodeStats node : nodeStats.getNodes()) {\n+ allNodeIds.add(node.getNode().getId());\n+ allNodeNames.add(node.getNode().getName());\n+ allNodeHosts.add(node.getHostname());\n+ }\n+\n+ String node_expr = \"_only_nodes:\" + Strings.arrayToCommaDelimitedString(allNodeIds.toArray());\n+ request = client.prepareSearch(\"test\").setQuery(matchAllQuery()).setPreference(node_expr);\n+ assertSearchOnRandomNodes(request);\n+\n+ node_expr = \"_only_nodes:\" + Strings.arrayToCommaDelimitedString(allNodeNames.toArray());\n+ request = client.prepareSearch(\"test\").setQuery(matchAllQuery()).setPreference(node_expr);\n+ assertSearchOnRandomNodes(request);\n+\n+ node_expr = \"_only_nodes:\" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());\n+ request = client.prepareSearch(\"test\").setQuery(matchAllQuery()).setPreference(node_expr);\n+ assertSearchOnRandomNodes(request);\n+\n+ node_expr = \"_only_nodes:\" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());\n+ request = client.prepareSearch(\"test\").setQuery(matchAllQuery()).setPreference(node_expr);\n+ assertSearchOnRandomNodes(request);\n+\n+ // Mix of valid and invalid nodes\n+ node_expr = \"_only_nodes:*,invalidnode\";\n+ request = client.prepareSearch(\"test\").setQuery(matchAllQuery()).setPreference(node_expr);\n+ assertSearchOnRandomNodes(request);\n+ }\n+\n+ private void assertSearchOnRandomNodes(SearchRequestBuilder request) {\n+ Set<String> hitNodes = new HashSet<>();\n+ for (int i = 0; i < 2; i++) {\n+ SearchResponse searchResponse = request.execute().actionGet();\n+ assertThat(searchResponse.getHits().getHits().length, greaterThan(0));\n+ hitNodes.add(searchResponse.getHits().getAt(0).shard().nodeId());\n+ }\n+ assertThat(hitNodes.size(), greaterThan(1));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: master @ c257e2c51f235853c4453a86e10e463813140fc9\n\n**JVM version**: Oracle Corporation 1.8.0_91 [OpenJDK 64-Bit Server VM 25.91-b14]\n\n**OS version**: Linux 4.4.9-300.fc23.x86_64 (amd64)\n\n**Description of the problem including expected versus actual behavior**:\nES fails to respond with an error for an invalid URL but doesn't disconnect the client.\n\nThe REST layer should fail with an appropriate error and close the connection immediately.\n\n**Steps to reproduce**:\n`curl 'localhost:9200/%a`'\n\n**Provide logs (if relevant)**:\n\n```\n[elasticsearch] [2016-05-19 14:35:52,468][ERROR][rest ] [Nuke - Squadron Supreme Member] failed to send failure response for uri [/%a]\n[elasticsearch] java.lang.IllegalArgumentException: partial escape sequence at end of string: /%a\n[elasticsearch] at org.elasticsearch.rest.support.RestUtils.decode(RestUtils.java:182)\n[elasticsearch] at org.elasticsearch.rest.support.RestUtils.decodeComponent(RestUtils.java:143)\n[elasticsearch] at org.elasticsearch.rest.support.RestUtils.decodeComponent(RestUtils.java:107)\n[elasticsearch] at org.elasticsearch.rest.RestRequest.path(RestRequest.java:62)\n[elasticsearch] at org.elasticsearch.rest.BytesRestResponse.convert(BytesRestResponse.java:126)\n[elasticsearch] at org.elasticsearch.rest.BytesRestResponse.<init>(BytesRestResponse.java:90)\n[elasticsearch] at org.elasticsearch.rest.BytesRestResponse.<init>(BytesRestResponse.java:81)\n[elasticsearch] at org.elasticsearch.rest.RestController.sendErrorResponse(RestController.java:184)\n[elasticsearch] at org.elasticsearch.http.HttpServer.dispatchRequest(HttpServer.java:116)\n[elasticsearch] at org.elasticsearch.http.netty.NettyHttpServerTransport.dispatchRequest(NettyHttpServerTransport.java:490)\n[elasticsearch] at org.elasticsearch.http.netty.HttpRequestHandler.messageReceived(HttpRequestHandler.java:65)\n[elasticsearch] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)\n[elasticsearch] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)\n[elasticsearch] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)\n[elasticsearch] at org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler.messageReceived(HttpPipeliningHandler.java:85)\n\n```\n", "comments": [ { "body": "Thanks for reporting. I opened #18477.\n", "created_at": "2016-05-19T19:13:59Z" } ], "number": 18476, "title": "Rest URL path parsing fails with IAE when trying to send an error response" }
{ "body": "Today when sending a REST error to a client, we send the decoded\npath. But decoding that path can already be the cause of the error in\nwhich case decoding it again will just throw an exception leading to us\nnever sending an error back to the client. It would be better to send\nthe entire raw path to the client and that is what we do in this commit.\n\nCloses #18476\n", "number": 18477, "review_comments": [], "title": "Do not decode path when sending error" }
{ "commits": [ { "message": "Do not decode path when sending error\n\nToday when sending a REST error to a client, we send the decoded\npath. But decoding that path can already be the cause of the error in\nwhich case decoding it again will just throw an exception leading to us\nnever sending an error back to the client. It would be better to send\nthe entire raw path to the client and that is what we do in this commit." }, { "message": "Add test for when path contains encoding error\n\nThis commit adds a test for the case of returning an error response to\nthe client when the client path request contained an encoding\nerror. Previously, such paths would cause preparing the response to\nthrow an IllegalArgumentException because such preparation would again\nattempt to decode the path. Instead, we now return the raw path to the\nclient and this test ensures that such behavior is maintained." }, { "message": "Fix path encoding error test\n\nThis commit fixes an issue that arose by creating an anonymous class\nderiving from ElasticsearchException. Creating such a class but not\nregistering it violates a test that requires all classes inheriting from\nElasticsearchException to be registered. But we should not register such\nan exception just for a test so this commit refactors the test setup to\nnot require the creation of an anonymous class inheriting from\nElasticsearchException." }, { "message": "Slightly modify format of suppressed REST error\n\nThis commit modifies the format of a suppressed REST error to specify\nlabel the path and the params." }, { "message": "Fix formatting in BytesRestResponse raw path tests\n\nThis commit removes some unnecessary newlines in the raw path tests in\nBytesRestResponse." } ], "files": [ { "diff": "@@ -123,9 +123,9 @@ private static XContentBuilder convert(RestChannel channel, RestStatus status, T\n params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, \"false\"), channel.request());\n } else {\n if (status.getStatus() < 500) {\n- SUPPRESSED_ERROR_LOGGER.debug(\"{} Params: {}\", t, channel.request().path(), channel.request().params());\n+ SUPPRESSED_ERROR_LOGGER.debug(\"path: {}, params: {}\", t, channel.request().rawPath(), channel.request().params());\n } else {\n- SUPPRESSED_ERROR_LOGGER.warn(\"{} Params: {}\", t, channel.request().path(), channel.request().params());\n+ SUPPRESSED_ERROR_LOGGER.warn(\"path: {}, params: {}\", t, channel.request().rawPath(), channel.request().params());\n }\n params = channel.request();\n }", "filename": "core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java", "status": "modified" }, { "diff": "@@ -25,6 +25,7 @@\n import org.elasticsearch.action.search.ShardSearchFailure;\n import org.elasticsearch.common.ParsingException;\n import org.elasticsearch.index.Index;\n+import org.elasticsearch.rest.support.RestUtils;\n import org.elasticsearch.search.SearchShardTarget;\n import org.elasticsearch.test.ESTestCase;\n import org.elasticsearch.test.rest.FakeRestRequest;\n@@ -35,8 +36,11 @@\n \n import static org.hamcrest.Matchers.contains;\n import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.not;\n import static org.hamcrest.Matchers.notNullValue;\n+import static org.mockito.Mockito.mock;\n+import static org.mockito.Mockito.when;\n \n /**\n *\n@@ -147,6 +151,32 @@ public void testConvert() throws IOException {\n assertTrue(stackTrace.contains(\"Caused by: ParsingException[foobar]\"));\n }\n \n+ public void testResponseWhenPathContainsEncodingError() throws IOException {\n+ final String path = \"%a\";\n+ final RestRequest request = mock(RestRequest.class);\n+ when(request.rawPath()).thenReturn(path);\n+ final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestUtils.decodeComponent(request.rawPath()));\n+ final RestChannel channel = new DetailedExceptionRestChannel(request);\n+ // if we try to decode the path, this will throw an IllegalArgumentException again\n+ final BytesRestResponse response = new BytesRestResponse(channel, e);\n+ assertNotNull(response.content());\n+ final String content = response.content().toUtf8();\n+ assertThat(content, containsString(\"\\\"type\\\":\\\"illegal_argument_exception\\\"\"));\n+ assertThat(content, containsString(\"\\\"reason\\\":\\\"partial escape sequence at end of string: %a\\\"\"));\n+ assertThat(content, containsString(\"\\\"status\\\":\" + 400));\n+ }\n+\n+ public void testResponseWhenInternalServerError() throws IOException {\n+ final RestRequest request = new FakeRestRequest();\n+ final RestChannel channel = new DetailedExceptionRestChannel(request);\n+ final BytesRestResponse response = new BytesRestResponse(channel, new ElasticsearchException(\"simulated\"));\n+ assertNotNull(response.content());\n+ final String content = response.content().toUtf8();\n+ assertThat(content, containsString(\"\\\"type\\\":\\\"exception\\\"\"));\n+ assertThat(content, containsString(\"\\\"reason\\\":\\\"simulated\\\"\"));\n+ assertThat(content, containsString(\"\\\"status\\\":\" + 500));\n+ }\n+\n public static class WithHeadersException extends ElasticsearchException {\n \n WithHeadersException() {", "filename": "core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java", "status": "modified" } ] }
{ "body": "Allocating shards to a node can fail for various reasons. When an allocation fails, we currently ignore the node for that shard during the next allocation round. However, this means that:\n- subsequent rounds consider the node for allocating the shard again.\n- other shards are still allocated to the node (in particular the balancer tries to put shards on that node with the failed shard as its weight becomes smaller).\n This is particularly bad if the node is permanently broken, leading to a never-ending series of failed allocations. Ultimately this affects the stability of the cluster.\n", "comments": [ { "body": "@ywelsch I think we can approach this from multiple directions. \n- we can start bottom up and check that a data-path is writeable before we allocate a shard and skip it if possible (that would help if someone looses a disk and has multiple)\n- we can also has a simple allocation_failed counter on UnassignedInfo to prevent endless allocation of a potentially broken index (metadata / settings / whatever is broken)\n- we might also be able to use a simple counter of failed allocations per node that we can reset once we had a successful one on that node. We can then also have a simple allocation decider that throttles that node or takes it out of the loop entirely once the counter goes beyond a threshold?\n\nI think in all of these cases simplicity wins over complex state... my $0.05\n", "created_at": "2016-05-17T18:38:03Z" }, { "body": "@ywelsch @s1monw is there are news on this? \r\n\r\nSome OSs would cause the mounted disk to be read-only and if so the entire cluster will have issues with RED shards and not moving shards. Perhaps this could help in that end?", "created_at": "2017-08-21T14:59:47Z" }, { "body": "Pinging @elastic/es-distributed", "created_at": "2018-03-15T14:00:04Z" }, { "body": "We have another, non trivial, of instance of this in shard fetching. When it hard fails on a node (rather then succeeding by finding a broking copy) we currently redo the fetching. This is an easy way around networking issue but can be poisonous on disk failures (for example). ", "created_at": "2018-03-21T13:59:18Z" }, { "body": "We would rather remove the broken node from the cluster rather then take an fail allocation(s).", "created_at": "2022-07-28T13:10:48Z" } ], "number": 18417, "title": "Prevent allocating shards to broken nodes" }
{ "body": "Today if a shard fails during initialization phase due to misconfiguration, broken disks,\nmissing analyzers, not installed plugins etc. elasticsaerch keeps on trying to initialize\nor rather allocate that shard. Yet, in the worst case scenario this ends in an endless\nallocation loop. To prevent this loop and all it's sideeffects like spamming log files over\nand over again this commit adds an allocation decider that stops allocating a shard that\nfailed more than N times in a row to allocate. The number or retries can be configured via\n`index.allocation.max_retry` and it's default is set to `5`. Once the setting is updated\nshards with less failures than the number set per index will be allowed to allocate again.\n\nInternally we maintain a counter on the UnassignedInfo that is reset to `0` once the shards\nhas been started.\n\nRelates to #18417\n", "number": 18467, "review_comments": [ { "body": "This `0` is hardcoded, I think this was supposed to be `failedAllocation` instead\n", "created_at": "2016-05-19T15:38:18Z" }, { "body": "attemps -> attempts\n", "created_at": "2016-05-19T15:38:59Z" }, { "body": "same here, attemps -> attempts\n", "created_at": "2016-05-19T15:39:09Z" }, { "body": "Personal preference, but I think `index.allocation.max_retries` would be a better name\n", "created_at": "2016-05-19T15:41:48Z" }, { "body": "Will it be nice to show the last failure here as well? this will help explain how we got here.\n", "created_at": "2016-05-19T15:47:40Z" }, { "body": "just `(assert failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED)`?\n", "created_at": "2016-05-19T15:49:59Z" }, { "body": "some newlines are ok here :-)\n", "created_at": "2016-05-19T15:50:32Z" }, { "body": "I like @dakrone's suggestion here.\n", "created_at": "2016-05-19T15:54:45Z" }, { "body": "just call this variable `indexMetaData`?\n", "created_at": "2016-05-19T15:57:07Z" }, { "body": "you could parameterize the test on the number of retries. alternatively I would suggest using the setting `SETTING_ALLOCATION_MAX_RETRY.get(settings)` explicitly here instead of hardcoded 4.\n", "created_at": "2016-05-19T16:00:40Z" }, { "body": "s/`that are currently can't be allocated`/`that can't currently be allocated`/\n", "created_at": "2016-05-20T09:55:32Z" }, { "body": "same as above\n", "created_at": "2016-05-20T09:55:49Z" }, { "body": "request.isRetryFailed() as last parameter?\n", "created_at": "2016-05-20T09:57:35Z" }, { "body": "oh good call\n", "created_at": "2016-05-20T10:12:54Z" }, { "body": "add `[float]`before the header\n", "created_at": "2016-05-20T10:21:33Z" }, { "body": "```\nThe cluster will attempt to allocate a shard a maximum of\n`index.allocation.max_retries` times in a row (defaults to `5`), before giving\nup and leaving the shard unallocated. This scenario can be caused by\nstructural problems such as having an analyzer which refers to a stopwords\nfile which doesn't exist on all nodes.\n\nOnce the problem has been corrected, allocation can be manually retried by\ncalling the <<cluster-reroute,`_reroute`>> API with `?retry_failed`, which\nwill attempt a single retry round for these shards.\n```\n", "created_at": "2016-05-20T10:30:51Z" } ], "title": "Limit retries of failed allocations per index" }
{ "commits": [ { "message": "Limit retries of failed allocations per index\n\nToday if a shard fails during initialization phase due to misconfiguration, broken disks,\nmissing analyzers, not installed plugins etc. elasticsaerch keeps on trying to initialize\nor rather allocate that shard. Yet, in the worst case scenario this ends in an endless\nallocation loop. To prevent this loop and all it's sideeffects like spamming log files over\nand over again this commit adds an allocation decider that stops allocating a shard that\nfailed more than N times in a row to allocate. The number or retries can be configured via\n`index.allocation.max_retry` and it's default is set to `5`. Once the setting is updated\nshards with less failures than the number set per index will be allowed to allocate again.\n\nInternally we maintain a counter on the UnassignedInfo that is reset to `0` once the shards\nhas been started.\n\nRelates to #18417" }, { "message": "apply feedback from @dakrone" }, { "message": "fix test" }, { "message": "* Rename max_retry to max_retries\n* append unassigned info to allocaiton decision." }, { "message": "simplify assertion" }, { "message": "apply feedback" }, { "message": "fix line length in test" }, { "message": "add retry_failed=true|false flag to reroute API and add documentation" }, { "message": "fix check condition" }, { "message": "feedback from @ywelsch" }, { "message": "fix docs" }, { "message": "add unittests for ClusterReroute" } ], "files": [ { "diff": "@@ -250,7 +250,7 @@ protected void masterOperation(final ClusterAllocationExplainRequest request, fi\n final ActionListener<ClusterAllocationExplainResponse> listener) {\n final RoutingNodes routingNodes = state.getRoutingNodes();\n final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,\n- clusterInfoService.getClusterInfo(), System.nanoTime());\n+ clusterInfoService.getClusterInfo(), System.nanoTime(), false);\n \n ShardRouting foundShard = null;\n if (request.useAnyUnassignedShard()) {", "filename": "core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java", "status": "modified" }, { "diff": "@@ -38,9 +38,10 @@\n * Request to submit cluster reroute allocation commands\n */\n public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {\n- AllocationCommands commands = new AllocationCommands();\n- boolean dryRun;\n- boolean explain;\n+ private AllocationCommands commands = new AllocationCommands();\n+ private boolean dryRun;\n+ private boolean explain;\n+ private boolean retryFailed;\n \n public ClusterRerouteRequest() {\n }\n@@ -81,13 +82,30 @@ public ClusterRerouteRequest explain(boolean explain) {\n return this;\n }\n \n+ /**\n+ * Sets the retry failed flag (defaults to <tt>false</tt>). If true, the\n+ * request will retry allocating shards that can't currently be allocated due to too many allocation failures.\n+ */\n+ public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {\n+ this.retryFailed = retryFailed;\n+ return this;\n+ }\n+\n /**\n * Returns the current explain flag\n */\n public boolean explain() {\n return this.explain;\n }\n \n+ /**\n+ * Returns the current retry failed flag\n+ */\n+ public boolean isRetryFailed() {\n+ return this.retryFailed;\n+ }\n+\n+\n /**\n * Set the allocation commands to execute.\n */\n@@ -96,6 +114,13 @@ public ClusterRerouteRequest commands(AllocationCommand... commands) {\n return this;\n }\n \n+ /**\n+ * Returns the allocation commands to execute\n+ */\n+ public AllocationCommands getCommands() {\n+ return commands;\n+ }\n+\n /**\n * Sets the source for the request.\n */\n@@ -136,6 +161,7 @@ public void readFrom(StreamInput in) throws IOException {\n commands = AllocationCommands.readFrom(in);\n dryRun = in.readBoolean();\n explain = in.readBoolean();\n+ retryFailed = in.readBoolean();\n readTimeout(in);\n }\n \n@@ -145,6 +171,7 @@ public void writeTo(StreamOutput out) throws IOException {\n AllocationCommands.writeTo(commands, out);\n out.writeBoolean(dryRun);\n out.writeBoolean(explain);\n+ out.writeBoolean(retryFailed);\n writeTimeout(out);\n }\n }", "filename": "core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java", "status": "modified" }, { "diff": "@@ -60,6 +60,15 @@ public ClusterRerouteRequestBuilder setExplain(boolean explain) {\n return this;\n }\n \n+ /**\n+ * Sets the retry failed flag (defaults to <tt>false</tt>). If true, the\n+ * request will retry allocating shards that can't currently be allocated due to too many allocation failures.\n+ */\n+ public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {\n+ request.setRetryFailed(retryFailed);\n+ return this;\n+ }\n+\n /**\n * Sets the commands for the request to execute.\n */", "filename": "core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java", "status": "modified" }, { "diff": "@@ -33,6 +33,7 @@\n import org.elasticsearch.cluster.service.ClusterService;\n import org.elasticsearch.common.Priority;\n import org.elasticsearch.common.inject.Inject;\n+import org.elasticsearch.common.logging.ESLogger;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n@@ -68,38 +69,55 @@ protected ClusterRerouteResponse newResponse() {\n \n @Override\n protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {\n- clusterService.submitStateUpdateTask(\"cluster_reroute (api)\", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {\n+ clusterService.submitStateUpdateTask(\"cluster_reroute (api)\", new ClusterRerouteResponseAckedClusterStateUpdateTask(logger,\n+ allocationService, request, listener));\n+ }\n \n- private volatile ClusterState clusterStateToSend;\n- private volatile RoutingExplanations explanations;\n+ static class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask<ClusterRerouteResponse> {\n \n- @Override\n- protected ClusterRerouteResponse newResponse(boolean acknowledged) {\n- return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);\n- }\n+ private final ClusterRerouteRequest request;\n+ private final ActionListener<ClusterRerouteResponse> listener;\n+ private final ESLogger logger;\n+ private final AllocationService allocationService;\n+ private volatile ClusterState clusterStateToSend;\n+ private volatile RoutingExplanations explanations;\n \n- @Override\n- public void onAckTimeout() {\n- listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));\n- }\n+ ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,\n+ ActionListener<ClusterRerouteResponse> listener) {\n+ super(Priority.IMMEDIATE, request, listener);\n+ this.request = request;\n+ this.listener = listener;\n+ this.logger = logger;\n+ this.allocationService = allocationService;\n+ }\n \n- @Override\n- public void onFailure(String source, Throwable t) {\n- logger.debug(\"failed to perform [{}]\", t, source);\n- super.onFailure(source, t);\n- }\n+ @Override\n+ protected ClusterRerouteResponse newResponse(boolean acknowledged) {\n+ return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);\n+ }\n+\n+ @Override\n+ public void onAckTimeout() {\n+ listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));\n+ }\n+\n+ @Override\n+ public void onFailure(String source, Throwable t) {\n+ logger.debug(\"failed to perform [{}]\", t, source);\n+ super.onFailure(source, t);\n+ }\n \n- @Override\n- public ClusterState execute(ClusterState currentState) {\n- RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, request.explain());\n- ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();\n- clusterStateToSend = newState;\n- explanations = routingResult.explanations();\n- if (request.dryRun) {\n- return currentState;\n- }\n- return newState;\n+ @Override\n+ public ClusterState execute(ClusterState currentState) {\n+ RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),\n+ request.isRetryFailed());\n+ ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();\n+ clusterStateToSend = newState;\n+ explanations = routingResult.explanations();\n+ if (request.dryRun()) {\n+ return currentState;\n }\n- });\n+ return newState;\n+ }\n }\n-}\n\\ No newline at end of file\n+}", "filename": "core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java", "status": "modified" }, { "diff": "@@ -49,6 +49,7 @@\n import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;\n+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;\n@@ -79,6 +80,7 @@ public class ClusterModule extends AbstractModule {\n new Setting<>(\"cluster.routing.allocation.type\", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);\n public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =\n Collections.unmodifiableList(Arrays.asList(\n+ MaxRetryAllocationDecider.class,\n SameShardAllocationDecider.class,\n FilterAllocationDecider.class,\n ReplicaAfterPrimaryActiveAllocationDecider.class,", "filename": "core/src/main/java/org/elasticsearch/cluster/ClusterModule.java", "status": "modified" }, { "diff": "@@ -48,7 +48,6 @@ public final class UnassignedInfo implements ToXContent, Writeable {\n public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =\n Setting.timeSetting(\"index.unassigned.node_left.delayed_timeout\", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,\n Property.IndexScope);\n-\n /**\n * Reason why the shard is in unassigned state.\n * <p>\n@@ -103,7 +102,11 @@ public enum Reason {\n /**\n * A better replica location is identified and causes the existing replica allocation to be cancelled.\n */\n- REALLOCATED_REPLICA;\n+ REALLOCATED_REPLICA,\n+ /**\n+ * Unassigned as a result of a failed primary while the replica was initializing.\n+ */\n+ PRIMARY_FAILED;\n }\n \n private final Reason reason;\n@@ -112,6 +115,7 @@ public enum Reason {\n private final long lastComputedLeftDelayNanos; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)\n private final String message;\n private final Throwable failure;\n+ private final int failedAllocations;\n \n /**\n * creates an UnassingedInfo object based **current** time\n@@ -120,7 +124,7 @@ public enum Reason {\n * @param message more information about cause.\n **/\n public UnassignedInfo(Reason reason, String message) {\n- this(reason, message, null, System.nanoTime(), System.currentTimeMillis());\n+ this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis());\n }\n \n /**\n@@ -130,13 +134,16 @@ public UnassignedInfo(Reason reason, String message) {\n * @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation\n * @param unassignedTimeMillis the time of unassignment used to display to in our reporting.\n */\n- public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, long unassignedTimeNanos, long unassignedTimeMillis) {\n+ public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, int failedAllocations, long unassignedTimeNanos, long unassignedTimeMillis) {\n this.reason = reason;\n this.unassignedTimeMillis = unassignedTimeMillis;\n this.unassignedTimeNanos = unassignedTimeNanos;\n this.lastComputedLeftDelayNanos = 0L;\n this.message = message;\n this.failure = failure;\n+ this.failedAllocations = failedAllocations;\n+ assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED):\n+ \"failedAllocations: \" + failedAllocations + \" for reason \" + reason;\n assert !(message == null && failure != null) : \"provide a message if a failure exception is provided\";\n }\n \n@@ -147,17 +154,19 @@ public UnassignedInfo(UnassignedInfo unassignedInfo, long newComputedLeftDelayNa\n this.lastComputedLeftDelayNanos = newComputedLeftDelayNanos;\n this.message = unassignedInfo.message;\n this.failure = unassignedInfo.failure;\n+ this.failedAllocations = unassignedInfo.failedAllocations;\n }\n \n public UnassignedInfo(StreamInput in) throws IOException {\n this.reason = Reason.values()[(int) in.readByte()];\n this.unassignedTimeMillis = in.readLong();\n // As System.nanoTime() cannot be compared across different JVMs, reset it to now.\n- // This means that in master failover situations, elapsed delay time is forgotten.\n+ // This means that in master fail-over situations, elapsed delay time is forgotten.\n this.unassignedTimeNanos = System.nanoTime();\n this.lastComputedLeftDelayNanos = 0L;\n this.message = in.readOptionalString();\n this.failure = in.readThrowable();\n+ this.failedAllocations = in.readVInt();\n }\n \n public void writeTo(StreamOutput out) throws IOException {\n@@ -166,12 +175,18 @@ public void writeTo(StreamOutput out) throws IOException {\n // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs\n out.writeOptionalString(message);\n out.writeThrowable(failure);\n+ out.writeVInt(failedAllocations);\n }\n \n public UnassignedInfo readFrom(StreamInput in) throws IOException {\n return new UnassignedInfo(in);\n }\n \n+ /**\n+ * Returns the number of previously failed allocations of this shard.\n+ */\n+ public int getNumFailedAllocations() { return failedAllocations; }\n+\n /**\n * The reason why the shard is unassigned.\n */\n@@ -325,7 +340,11 @@ public String shortSummary() {\n StringBuilder sb = new StringBuilder();\n sb.append(\"[reason=\").append(reason).append(\"]\");\n sb.append(\", at[\").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append(\"]\");\n+ if (failedAllocations > 0) {\n+ sb.append(\", failed_attempts[\").append(failedAllocations).append(\"]\");\n+ }\n String details = getDetails();\n+\n if (details != null) {\n sb.append(\", details[\").append(details).append(\"]\");\n }\n@@ -342,6 +361,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.startObject(\"unassigned_info\");\n builder.field(\"reason\", reason);\n builder.field(\"at\", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis));\n+ if (failedAllocations > 0) {\n+ builder.field(\"failed_attempts\", failedAllocations);\n+ }\n String details = getDetails();\n if (details != null) {\n builder.field(\"details\", details);", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java", "status": "modified" }, { "diff": "@@ -222,8 +222,10 @@ public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, Lis\n List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);\n orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));\n for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {\n+ UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();\n+ final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;\n changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,\n- System.nanoTime(), System.currentTimeMillis()));\n+ failedAllocations + 1, System.nanoTime(), System.currentTimeMillis()));\n }\n if (!changed) {\n return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());\n@@ -257,16 +259,13 @@ private <T> String firstListElementsToCommaDelimitedString(List<T> elements, Fun\n .collect(Collectors.joining(\", \"));\n }\n \n- public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {\n- return reroute(clusterState, commands, false);\n- }\n-\n- public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) {\n+ public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {\n RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);\n // we don't shuffle the unassigned shards here, to try and get as close as possible to\n // a consistent result of the effect the commands have on the routing\n // this allows systems to dry run the commands, see the resulting cluster state, and act on it\n- RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());\n+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,\n+ clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed);\n // don't short circuit deciders, we want a full explanation\n allocation.debugDecision(true);\n // we ignore disable allocation, because commands are explicit\n@@ -305,7 +304,8 @@ protected RoutingAllocation.Result reroute(ClusterState clusterState, String rea\n RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);\n // shuffle the unassigned nodes, just so we won't have things like poison failed shards\n routingNodes.unassigned().shuffle();\n- RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());\n+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,\n+ clusterInfoService.getClusterInfo(), currentNanoTime(), false);\n allocation.debugDecision(debug);\n if (!reroute(allocation)) {\n return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());\n@@ -437,7 +437,7 @@ private boolean deassociateDeadNodes(RoutingAllocation allocation) {\n // now, go over all the shards routing on the node, and fail them\n for (ShardRouting shardRouting : node.copyShards()) {\n UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, \"node_left[\" + node.nodeId() + \"]\", null,\n- allocation.getCurrentNanoTime(), System.currentTimeMillis());\n+ 0, allocation.getCurrentNanoTime(), System.currentTimeMillis());\n applyFailedShard(allocation, shardRouting, false, unassignedInfo);\n }\n // its a dead node, remove it, note, its important to remove it *after* we apply failed shard\n@@ -457,8 +457,8 @@ private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, S\n boolean changed = false;\n for (ShardRouting routing : replicas) {\n changed |= applyFailedShard(allocation, routing, false,\n- new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, \"primary failed while replica initializing\",\n- null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));\n+ new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, \"primary failed while replica initializing\",\n+ null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis()));\n }\n return changed;\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java", "status": "modified" }, { "diff": "@@ -58,7 +58,7 @@ public String toString() {\n private final List<FailedShard> failedShards;\n \n public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo) {\n- super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());\n+ super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime(), false);\n this.failedShards = failedShards;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java", "status": "modified" }, { "diff": "@@ -134,6 +134,8 @@ public RoutingExplanations explanations() {\n \n private boolean ignoreDisable = false;\n \n+ private final boolean retryFailed;\n+\n private boolean debugDecision = false;\n \n private boolean hasPendingAsyncFetch = false;\n@@ -148,14 +150,15 @@ public RoutingExplanations explanations() {\n * @param clusterState cluster state before rerouting\n * @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})\n */\n- public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime) {\n+ public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) {\n this.deciders = deciders;\n this.routingNodes = routingNodes;\n this.metaData = clusterState.metaData();\n this.nodes = clusterState.nodes();\n this.customs = clusterState.customs();\n this.clusterInfo = clusterInfo;\n this.currentNanoTime = currentNanoTime;\n+ this.retryFailed = retryFailed;\n }\n \n /** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */\n@@ -297,4 +300,8 @@ public boolean hasPendingAsyncFetch() {\n public void setHasPendingAsyncFetch() {\n this.hasPendingAsyncFetch = true;\n }\n+\n+ public boolean isRetryFailed() {\n+ return retryFailed;\n+ }\n }", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java", "status": "modified" }, { "diff": "@@ -36,7 +36,7 @@ public class StartedRerouteAllocation extends RoutingAllocation {\n private final List<? extends ShardRouting> startedShards;\n \n public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {\n- super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());\n+ super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime(), false);\n this.startedShards = startedShards;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java", "status": "modified" }, { "diff": "@@ -125,7 +125,7 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain)\n // we need to move the unassigned info back to treat it as if it was index creation\n unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,\n \"force empty allocation from previous reason \" + shardRouting.unassignedInfo().getReason() + \", \" + shardRouting.unassignedInfo().getMessage(),\n- shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis());\n+ shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis());\n }\n \n initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java", "status": "modified" }, { "diff": "@@ -0,0 +1,83 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.cluster.routing.allocation.decider;\n+\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.cluster.routing.RoutingNode;\n+import org.elasticsearch.cluster.routing.ShardRouting;\n+import org.elasticsearch.cluster.routing.UnassignedInfo;\n+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;\n+import org.elasticsearch.common.inject.Inject;\n+import org.elasticsearch.common.settings.Setting;\n+import org.elasticsearch.common.settings.Settings;\n+\n+/**\n+ * An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without\n+ * success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until\n+ * the setting for <tt>index.allocation.max_retry</tt> is raised. The default value is <tt>5</tt>.\n+ * Note: This allocation decider also allows allocation of repeatedly failing shards when the <tt>/_cluster/reroute?retry_failed=true</tt>\n+ * API is manually invoked. This allows single retries without raising the limits.\n+ *\n+ * @see RoutingAllocation#isRetryFailed()\n+ */\n+public class MaxRetryAllocationDecider extends AllocationDecider {\n+\n+ public static final Setting<Integer> SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting(\"index.allocation.max_retries\", 5, 0,\n+ Setting.Property.Dynamic, Setting.Property.IndexScope);\n+\n+ public static final String NAME = \"max_retry\";\n+\n+ /**\n+ * Initializes a new {@link MaxRetryAllocationDecider}\n+ *\n+ * @param settings {@link Settings} used by this {@link AllocationDecider}\n+ */\n+ @Inject\n+ public MaxRetryAllocationDecider(Settings settings) {\n+ super(settings);\n+ }\n+\n+ @Override\n+ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {\n+ UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();\n+ if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) {\n+ final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());\n+ final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings());\n+ if (allocation.isRetryFailed()) { // manual allocation - retry\n+ // if we are called via the _reroute API we ignore the failure counter and try to allocate\n+ // this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is\n+ // enough to manually retry.\n+ return allocation.decision(Decision.YES, NAME, \"shard has already failed allocating [\"\n+ + unassignedInfo.getNumFailedAllocations() + \"] times vs. [\" + maxRetry + \"] retries allowed \"\n+ + unassignedInfo.toString() + \" - retrying once on manual allocation\");\n+ } else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) {\n+ return allocation.decision(Decision.NO, NAME, \"shard has already failed allocating [\"\n+ + unassignedInfo.getNumFailedAllocations() + \"] times vs. [\" + maxRetry + \"] retries allowed \"\n+ + unassignedInfo.toString() + \" - manually call [/_cluster/reroute?retry_failed=true] to retry\");\n+ }\n+ }\n+ return allocation.decision(Decision.YES, NAME, \"shard has no previous failures\");\n+ }\n+\n+ @Override\n+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {\n+ return canAllocate(shardRouting, allocation);\n+ }\n+}", "filename": "core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java", "status": "added" }, { "diff": "@@ -21,6 +21,7 @@\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.routing.UnassignedInfo;\n import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;\n+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;\n import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;\n import org.elasticsearch.common.settings.Setting.Property;\n import org.elasticsearch.gateway.PrimaryShardAllocator;\n@@ -40,7 +41,6 @@\n import org.elasticsearch.index.store.FsDirectoryService;\n import org.elasticsearch.index.store.IndexStore;\n import org.elasticsearch.index.store.Store;\n-import org.elasticsearch.index.IndexWarmer;\n import org.elasticsearch.indices.IndicesRequestCache;\n \n import java.util.Arrays;\n@@ -59,6 +59,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {\n public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);\n \n public static final Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(\n+ MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY,\n IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,\n IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,\n IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,", "filename": "core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java", "status": "modified" }, { "diff": "@@ -108,7 +108,7 @@ public boolean processExistingRecoveries(RoutingAllocation allocation) {\n currentNode, nodeWithHighestMatch);\n it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA,\n \"existing allocation of replica to [\" + currentNode + \"] cancelled, sync id match found on node [\" + nodeWithHighestMatch + \"]\",\n- null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));\n+ null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis()));\n changed = true;\n }\n }", "filename": "core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java", "status": "modified" }, { "diff": "@@ -64,6 +64,7 @@ public RestClusterRerouteAction(Settings settings, RestController controller, Cl\n public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception {\n final ClusterRerouteRequest clusterRerouteRequest = Requests.clusterRerouteRequest();\n clusterRerouteRequest.dryRun(request.paramAsBoolean(\"dry_run\", clusterRerouteRequest.dryRun()));\n+ clusterRerouteRequest.setRetryFailed(request.paramAsBoolean(\"retry_failed\", clusterRerouteRequest.isRetryFailed()));\n clusterRerouteRequest.explain(request.paramAsBoolean(\"explain\", clusterRerouteRequest.explain()));\n clusterRerouteRequest.timeout(request.paramAsTime(\"timeout\", clusterRerouteRequest.timeout()));\n clusterRerouteRequest.masterNodeTimeout(request.paramAsTime(\"master_timeout\", clusterRerouteRequest.masterNodeTimeout()));", "filename": "core/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java", "status": "modified" }, { "diff": "@@ -0,0 +1,181 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.action.admin.cluster.reroute;\n+\n+import org.elasticsearch.Version;\n+import org.elasticsearch.action.ActionListener;\n+import org.elasticsearch.cluster.ClusterState;\n+import org.elasticsearch.cluster.EmptyClusterInfoService;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.node.DiscoveryNodes;\n+import org.elasticsearch.cluster.routing.RoutingTable;\n+import org.elasticsearch.cluster.routing.allocation.AllocationService;\n+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;\n+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;\n+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;\n+import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;\n+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;\n+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;\n+import org.elasticsearch.common.bytes.BytesReference;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n+import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;\n+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;\n+import org.elasticsearch.common.io.stream.StreamInput;\n+import org.elasticsearch.common.network.NetworkModule;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.test.ESAllocationTestCase;\n+import org.elasticsearch.test.gateway.NoopGatewayAllocator;\n+\n+import java.io.IOException;\n+import java.util.Collections;\n+import java.util.List;\n+import java.util.concurrent.atomic.AtomicReference;\n+\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;\n+\n+public class ClusterRerouteTests extends ESAllocationTestCase {\n+\n+ public void testSerializeRequest() throws IOException {\n+ ClusterRerouteRequest req = new ClusterRerouteRequest();\n+ req.setRetryFailed(randomBoolean());\n+ req.dryRun(randomBoolean());\n+ req.explain(randomBoolean());\n+ req.commands(new AllocateEmptyPrimaryAllocationCommand(\"foo\", 1, \"bar\", randomBoolean()));\n+ req.timeout(TimeValue.timeValueMillis(randomIntBetween(0, 100)));\n+ BytesStreamOutput out = new BytesStreamOutput();\n+ req.writeTo(out);\n+ BytesReference bytes = out.bytes();\n+ NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();\n+ new NetworkModule(null, Settings.EMPTY, true, namedWriteableRegistry);\n+ StreamInput wrap = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes.toBytes()),\n+ namedWriteableRegistry);\n+ ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest();\n+ deserializedReq.readFrom(wrap);\n+\n+ assertEquals(req.isRetryFailed(), deserializedReq.isRetryFailed());\n+ assertEquals(req.dryRun(), deserializedReq.dryRun());\n+ assertEquals(req.explain(), deserializedReq.explain());\n+ assertEquals(req.timeout(), deserializedReq.timeout());\n+ assertEquals(1, deserializedReq.getCommands().commands().size()); // allocation commands have their own tests\n+ assertEquals(req.getCommands().commands().size(), deserializedReq.getCommands().commands().size());\n+ }\n+\n+ public void testClusterStateUpdateTask() {\n+ AllocationService allocationService = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,\n+ Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),\n+ NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);\n+ ClusterState clusterState = createInitialClusterState(allocationService);\n+ ClusterRerouteRequest req = new ClusterRerouteRequest();\n+ req.dryRun(true);\n+ AtomicReference<ClusterRerouteResponse> responseRef = new AtomicReference<>();\n+ ActionListener<ClusterRerouteResponse> responseActionListener = new ActionListener<ClusterRerouteResponse>() {\n+ @Override\n+ public void onResponse(ClusterRerouteResponse clusterRerouteResponse) {\n+ responseRef.set(clusterRerouteResponse);\n+ }\n+\n+ @Override\n+ public void onFailure(Throwable e) {\n+\n+ }\n+ };\n+ TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask task =\n+ new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(logger, allocationService, req,\n+ responseActionListener );\n+ ClusterState execute = task.execute(clusterState);\n+ assertSame(execute, clusterState); // dry-run\n+ task.onAllNodesAcked(null);\n+ assertNotSame(responseRef.get().getState(), execute);\n+\n+ req.dryRun(false);// now we allocate\n+\n+ final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);\n+ // now fail it N-1 times\n+ for (int i = 0; i < retries; i++) {\n+ ClusterState newState = task.execute(clusterState);\n+ assertNotSame(newState, clusterState); // dry-run=false\n+ clusterState = newState;\n+ RoutingTable routingTable = clusterState.routingTable();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i);\n+ List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"boom\" + i,\n+ new UnsupportedOperationException()));\n+ RoutingAllocation.Result result = allocationService.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build();\n+ routingTable = clusterState.routingTable();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ if (i == retries-1) {\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+ } else {\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ }\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i+1);\n+ }\n+\n+\n+ // without retry_failed we won't allocate that shard\n+ ClusterState newState = task.execute(clusterState);\n+ assertNotSame(newState, clusterState); // dry-run=false\n+ task.onAllNodesAcked(null);\n+ assertSame(responseRef.get().getState(), newState);\n+ RoutingTable routingTable = clusterState.routingTable();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);\n+\n+ req.setRetryFailed(true); // now we manually retry and get the shard back into initializing\n+ newState = task.execute(clusterState);\n+ assertNotSame(newState, clusterState); // dry-run=false\n+ clusterState = newState;\n+ routingTable = clusterState.routingTable();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);\n+ }\n+\n+ private ClusterState createInitialClusterState(AllocationService service) {\n+ MetaData.Builder metaBuilder = MetaData.builder();\n+ metaBuilder.put(IndexMetaData.builder(\"idx\").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));\n+ MetaData metaData = metaBuilder.build();\n+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();\n+ routingTableBuilder.addAsNew(metaData.index(\"idx\"));\n+\n+ RoutingTable routingTable = routingTableBuilder.build();\n+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)\n+ .metaData(metaData).routingTable(routingTable).build();\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode(\"node1\")).put(newNode(\"node2\")))\n+ .build();\n+ RoutingTable prevRoutingTable = routingTable;\n+ routingTable = service.reroute(clusterState, \"reroute\").routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n+ assertEquals(prevRoutingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(prevRoutingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ return clusterState;\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java", "status": "added" }, { "diff": "@@ -64,15 +64,19 @@ public void testReasonOrdinalOrder() {\n UnassignedInfo.Reason.NODE_LEFT,\n UnassignedInfo.Reason.REROUTE_CANCELLED,\n UnassignedInfo.Reason.REINITIALIZED,\n- UnassignedInfo.Reason.REALLOCATED_REPLICA};\n+ UnassignedInfo.Reason.REALLOCATED_REPLICA,\n+ UnassignedInfo.Reason.PRIMARY_FAILED};\n for (int i = 0; i < order.length; i++) {\n assertThat(order[i].ordinal(), equalTo(i));\n }\n assertThat(UnassignedInfo.Reason.values().length, equalTo(order.length));\n }\n \n public void testSerialization() throws Exception {\n- UnassignedInfo meta = new UnassignedInfo(RandomPicks.randomFrom(random(), UnassignedInfo.Reason.values()), randomBoolean() ? randomAsciiOfLength(4) : null);\n+ UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), UnassignedInfo.Reason.values());\n+ UnassignedInfo meta = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ?\n+ new UnassignedInfo(reason, randomBoolean() ? randomAsciiOfLength(4) : null, null, randomIntBetween(1, 100), System.nanoTime(), System.currentTimeMillis()):\n+ new UnassignedInfo(reason, randomBoolean() ? randomAsciiOfLength(4) : null);\n BytesStreamOutput out = new BytesStreamOutput();\n meta.writeTo(out);\n out.close();\n@@ -82,6 +86,7 @@ public void testSerialization() throws Exception {\n assertThat(read.getUnassignedTimeInMillis(), equalTo(meta.getUnassignedTimeInMillis()));\n assertThat(read.getMessage(), equalTo(meta.getMessage()));\n assertThat(read.getDetails(), equalTo(meta.getDetails()));\n+ assertThat(read.getNumFailedAllocations(), equalTo(meta.getNumFailedAllocations()));\n }\n \n public void testIndexCreated() {\n@@ -273,7 +278,10 @@ public void testUnassignedDelayedOnlyOnNodeLeft() throws Exception {\n public void testUnassignedDelayOnlyNodeLeftNonNodeLeftReason() throws Exception {\n EnumSet<UnassignedInfo.Reason> reasons = EnumSet.allOf(UnassignedInfo.Reason.class);\n reasons.remove(UnassignedInfo.Reason.NODE_LEFT);\n- UnassignedInfo unassignedInfo = new UnassignedInfo(RandomPicks.randomFrom(random(), reasons), null);\n+ UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), reasons);\n+ UnassignedInfo unassignedInfo = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ?\n+ new UnassignedInfo(reason, null, null, 1, System.nanoTime(), System.currentTimeMillis()):\n+ new UnassignedInfo(reason, null);\n unassignedInfo = unassignedInfo.updateDelay(unassignedInfo.getUnassignedTimeInNanos() + 1, // add 1 tick delay\n Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), \"10h\").build(), Settings.EMPTY);\n long delay = unassignedInfo.getLastComputedLeftDelayNanos();\n@@ -287,7 +295,7 @@ public void testUnassignedDelayOnlyNodeLeftNonNodeLeftReason() throws Exception\n */\n public void testLeftDelayCalculation() throws Exception {\n final long baseTime = System.nanoTime();\n- UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, \"test\", null, baseTime, System.currentTimeMillis());\n+ UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, \"test\", null, 0, baseTime, System.currentTimeMillis());\n final long totalDelayNanos = TimeValue.timeValueMillis(10).nanos();\n final Settings settings = Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueNanos(totalDelayNanos)).build();\n unassignedInfo = unassignedInfo.updateDelay(baseTime, settings, Settings.EMPTY);", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java", "status": "modified" }, { "diff": "@@ -94,7 +94,7 @@ public void testMoveShardCommand() {\n } else {\n toNodeId = \"node1\";\n }\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", 0, existingNodeId, toNodeId)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", 0, existingNodeId, toNodeId)), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), equalTo(ShardRoutingState.RELOCATING));\n@@ -148,54 +148,54 @@ public void testAllocateCommand() {\n \n logger.info(\"--> allocating to non-existent node, should fail\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(index, shardId.id(), \"node42\")));\n+ allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(index, shardId.id(), \"node42\")), false, false);\n fail(\"expected IllegalArgumentException when allocating to non-existing node\");\n } catch (IllegalArgumentException e) {\n assertThat(e.getMessage(), containsString(\"failed to resolve [node42], no matching nodes\"));\n }\n \n logger.info(\"--> allocating to non-data node, should fail\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(index, shardId.id(), \"node4\")));\n+ allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(index, shardId.id(), \"node4\")), false, false);\n fail(\"expected IllegalArgumentException when allocating to non-data node\");\n } catch (IllegalArgumentException e) {\n assertThat(e.getMessage(), containsString(\"allocation can only be done on data nodes\"));\n }\n \n logger.info(\"--> allocating non-existing shard, should fail\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(\"test\", 1, \"node2\")));\n+ allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(\"test\", 1, \"node2\")), false, false);\n fail(\"expected ShardNotFoundException when allocating non-existing shard\");\n } catch (ShardNotFoundException e) {\n assertThat(e.getMessage(), containsString(\"no such shard\"));\n }\n \n logger.info(\"--> allocating non-existing index, should fail\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(\"test2\", 0, \"node2\")));\n+ allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(\"test2\", 0, \"node2\")), false, false);\n fail(\"expected ShardNotFoundException when allocating non-existing index\");\n } catch (IndexNotFoundException e) {\n assertThat(e.getMessage(), containsString(\"no such index\"));\n }\n \n logger.info(\"--> allocating empty primary with acceptDataLoss flag set to false\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(\"test\", 0, \"node1\", false)));\n+ allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(\"test\", 0, \"node1\", false)), false, false);\n fail(\"expected IllegalArgumentException when allocating empty primary with acceptDataLoss flag set to false\");\n } catch (IllegalArgumentException e) {\n assertThat(e.getMessage(), containsString(\"allocating an empty primary for \" + shardId + \" can result in data loss. Please confirm by setting the accept_data_loss parameter to true\"));\n }\n \n logger.info(\"--> allocating stale primary with acceptDataLoss flag set to false\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(new AllocateStalePrimaryAllocationCommand(index, shardId.id(), \"node1\", false)));\n+ allocation.reroute(clusterState, new AllocationCommands(new AllocateStalePrimaryAllocationCommand(index, shardId.id(), \"node1\", false)), false, false);\n fail(\"expected IllegalArgumentException when allocating stale primary with acceptDataLoss flag set to false\");\n } catch (IllegalArgumentException e) {\n assertThat(e.getMessage(), containsString(\"allocating an empty primary for \" + shardId + \" can result in data loss. Please confirm by setting the accept_data_loss parameter to true\"));\n }\n \n logger.info(\"--> allocating empty primary with acceptDataLoss flag set to true\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(\"test\", 0, \"node1\", true)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(\"test\", 0, \"node1\", true)), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -211,13 +211,13 @@ public void testAllocateCommand() {\n \n logger.info(\"--> allocate the replica shard on the primary shard node, should fail\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node1\")));\n+ allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node1\")), false, false);\n fail(\"expected IllegalArgumentException when allocating replica shard on the primary shard node\");\n } catch (IllegalArgumentException e) {\n }\n \n logger.info(\"--> allocate the replica shard on on the second node\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -236,7 +236,7 @@ public void testAllocateCommand() {\n \n logger.info(\"--> verify that we fail when there are no unassigned shards\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(\"test\", 0, \"node3\")));\n+ allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(\"test\", 0, \"node3\")), false, false);\n fail(\"expected IllegalArgumentException when allocating shard while no unassigned shard available\");\n } catch (IllegalArgumentException e) {\n }\n@@ -268,7 +268,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));\n \n logger.info(\"--> allocating empty primary shard with accept_data_loss flag set to true\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(\"test\", 0, \"node1\", true)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(\"test\", 0, \"node1\", true)), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -277,7 +277,7 @@ public void testCancelCommand() {\n \n logger.info(\"--> cancel primary allocation, make sure it fails...\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", false)));\n+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", false)), false, false);\n fail();\n } catch (IllegalArgumentException e) {\n }\n@@ -291,13 +291,13 @@ public void testCancelCommand() {\n \n logger.info(\"--> cancel primary allocation, make sure it fails...\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", false)));\n+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", false)), false, false);\n fail();\n } catch (IllegalArgumentException e) {\n }\n \n logger.info(\"--> allocate the replica shard on on the second node\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -306,7 +306,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().node(\"node2\").shardsWithState(INITIALIZING).size(), equalTo(1));\n \n logger.info(\"--> cancel the relocation allocation\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node2\", false)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node2\", false)), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -315,7 +315,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().node(\"node3\").size(), equalTo(0));\n \n logger.info(\"--> allocate the replica shard on on the second node\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -325,7 +325,7 @@ public void testCancelCommand() {\n \n logger.info(\"--> cancel the primary being replicated, make sure it fails\");\n try {\n- allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", false)));\n+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", false)), false, false);\n fail();\n } catch (IllegalArgumentException e) {\n }\n@@ -339,7 +339,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().node(\"node2\").shardsWithState(STARTED).size(), equalTo(1));\n \n logger.info(\"--> cancel allocation of the replica shard\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node2\", false)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node2\", false)), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -348,7 +348,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().node(\"node3\").size(), equalTo(0));\n \n logger.info(\"--> allocate the replica shard on on the second node\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand(\"test\", 0, \"node2\")), false, false);\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(rerouteResult.changed(), equalTo(true));\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n@@ -364,7 +364,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().node(\"node2\").shardsWithState(STARTED).size(), equalTo(1));\n \n logger.info(\"--> move the replica shard\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", 0, \"node2\", \"node3\")));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", 0, \"node2\", \"node3\")), false, false);\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n assertThat(clusterState.getRoutingNodes().node(\"node1\").shardsWithState(STARTED).size(), equalTo(1));\n@@ -374,7 +374,7 @@ public void testCancelCommand() {\n assertThat(clusterState.getRoutingNodes().node(\"node3\").shardsWithState(INITIALIZING).size(), equalTo(1));\n \n logger.info(\"--> cancel the move of the replica shard\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node3\", false)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node3\", false)), false, false);\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(\"node1\").size(), equalTo(1));\n assertThat(clusterState.getRoutingNodes().node(\"node1\").shardsWithState(STARTED).size(), equalTo(1));\n@@ -383,7 +383,7 @@ public void testCancelCommand() {\n \n \n logger.info(\"--> cancel the primary allocation (with allow_primary set to true)\");\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", true)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(\"test\", 0, \"node1\", true)), false, false);\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(rerouteResult.changed(), equalTo(true));\n logger.error(clusterState.prettyPrint());", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java", "status": "modified" }, { "diff": "@@ -868,7 +868,7 @@ public void testUnassignedShardsWithUnbalancedZones() {\n }\n commands.add(new MoveAllocationCommand(\"test\", 0, primaryNode, \"A-4\"));\n \n- routingTable = strategy.reroute(clusterState, commands).routingTable();\n+ routingTable = strategy.reroute(clusterState, commands, false, false).routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n \n assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java", "status": "modified" }, { "diff": "@@ -149,8 +149,8 @@ public void testDeadNodeWhileRelocatingOnToNode() {\n \n logger.info(\"--> moving primary shard to node3\");\n rerouteResult = allocation.reroute(clusterState, new AllocationCommands(\n- new MoveAllocationCommand(\"test\", 0, clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\"))\n- );\n+ new MoveAllocationCommand(\"test\", 0, clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\")),\n+ false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));\n@@ -223,8 +223,8 @@ public void testDeadNodeWhileRelocatingOnFromNode() {\n \n logger.info(\"--> moving primary shard to node3\");\n rerouteResult = allocation.reroute(clusterState, new AllocationCommands(\n- new MoveAllocationCommand(\"test\",0 , clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\"))\n- );\n+ new MoveAllocationCommand(\"test\",0 , clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\")),\n+ false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java", "status": "modified" }, { "diff": "@@ -149,7 +149,7 @@ public void addListener(Listener listener) {\n } else {\n toNodeId = \"node1\";\n }\n- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", 0, existingNodeId, toNodeId)));\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", 0, existingNodeId, toNodeId)), false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertEquals(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), ShardRoutingState.RELOCATING);", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java", "status": "modified" }, { "diff": "@@ -109,8 +109,8 @@ public void testFailedShardPrimaryRelocatingToAndFrom() {\n \n logger.info(\"--> moving primary shard to node3\");\n rerouteResult = allocation.reroute(clusterState, new AllocationCommands(\n- new MoveAllocationCommand(\"test\", 0, clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\"))\n- );\n+ new MoveAllocationCommand(\"test\", 0, clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\")),\n+ false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));\n@@ -125,8 +125,8 @@ public void testFailedShardPrimaryRelocatingToAndFrom() {\n \n logger.info(\"--> moving primary shard to node3\");\n rerouteResult = allocation.reroute(clusterState, new AllocationCommands(\n- new MoveAllocationCommand(\"test\", 0, clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\"))\n- );\n+ new MoveAllocationCommand(\"test\", 0, clusterState.routingTable().index(\"test\").shard(0).primaryShard().currentNodeId(), \"node3\")),\n+ false, false);\n assertThat(rerouteResult.changed(), equalTo(true));\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,210 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.cluster.routing.allocation;\n+\n+import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.ClusterState;\n+import org.elasticsearch.cluster.EmptyClusterInfoService;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.node.DiscoveryNodes;\n+import org.elasticsearch.cluster.routing.RoutingTable;\n+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;\n+import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;\n+import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand;\n+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;\n+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;\n+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.test.ESAllocationTestCase;\n+import org.elasticsearch.test.gateway.NoopGatewayAllocator;\n+\n+import java.util.Collections;\n+import java.util.List;\n+\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;\n+\n+public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {\n+\n+ private AllocationService strategy;\n+\n+ @Override\n+ public void setUp() throws Exception {\n+ super.setUp();\n+ strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,\n+ Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),\n+ NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);\n+ }\n+\n+ private ClusterState createInitialClusterState() {\n+ MetaData.Builder metaBuilder = MetaData.builder();\n+ metaBuilder.put(IndexMetaData.builder(\"idx\").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));\n+ MetaData metaData = metaBuilder.build();\n+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();\n+ routingTableBuilder.addAsNew(metaData.index(\"idx\"));\n+\n+ RoutingTable routingTable = routingTableBuilder.build();\n+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)\n+ .metaData(metaData).routingTable(routingTable).build();\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode(\"node1\")).put(newNode(\"node2\")))\n+ .build();\n+ RoutingTable prevRoutingTable = routingTable;\n+ routingTable = strategy.reroute(clusterState, \"reroute\", false).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n+ assertEquals(prevRoutingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(prevRoutingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ return clusterState;\n+ }\n+\n+ public void testSingleRetryOnIgnore() {\n+ ClusterState clusterState = createInitialClusterState();\n+ RoutingTable routingTable = clusterState.routingTable();\n+ final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);\n+ // now fail it N-1 times\n+ for (int i = 0; i < retries-1; i++) {\n+ List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"boom\" + i,\n+ new UnsupportedOperationException()));\n+ RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i+1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\" + i);\n+ }\n+ // now we go and check that we are actually stick to unassigned on the next failure\n+ List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"boom\",\n+ new UnsupportedOperationException()));\n+ RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\");\n+\n+ result = strategy.reroute(clusterState, new AllocationCommands(), false, true); // manual reroute should retry once\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\");\n+\n+ // now we go and check that we are actually stick to unassigned on the next failure ie. no retry\n+ failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"boom\",\n+ new UnsupportedOperationException()));\n+ result = strategy.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries+1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\");\n+\n+ }\n+\n+ public void testFailedAllocation() {\n+ ClusterState clusterState = createInitialClusterState();\n+ RoutingTable routingTable = clusterState.routingTable();\n+ final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);\n+ // now fail it N-1 times\n+ for (int i = 0; i < retries-1; i++) {\n+ List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"boom\" + i,\n+ new UnsupportedOperationException()));\n+ RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i+1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\" + i);\n+ }\n+ // now we go and check that we are actually stick to unassigned on the next failure\n+ {\n+ List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"boom\",\n+ new UnsupportedOperationException()));\n+ RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), UNASSIGNED);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\");\n+ }\n+\n+ // change the settings and ensure we can do another round of allocation for that index.\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable)\n+ .metaData(MetaData.builder(clusterState.metaData())\n+ .put(IndexMetaData.builder(clusterState.metaData().index(\"idx\")).settings(\n+ Settings.builder().put(clusterState.metaData().index(\"idx\").getSettings()).put(\"index.allocation.max_retries\",\n+ retries+1).build()\n+ ).build(), true).build()).build();\n+ RoutingAllocation.Result result = strategy.reroute(clusterState, \"settings changed\", false);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ // good we are initializing and we are maintaining failure information\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"boom\");\n+\n+ // now we start the shard\n+ routingTable = strategy.applyStartedShards(clusterState, Collections.singletonList(routingTable.index(\"idx\")\n+ .shard(0).shards().get(0))).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n+ // all counters have been reset to 0 ie. no unassigned info\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertNull(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo());\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), STARTED);\n+\n+ // now fail again and see if it has a new counter\n+ List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(\n+ new FailedRerouteAllocation.FailedShard(routingTable.index(\"idx\").shard(0).shards().get(0), \"ZOOOMG\",\n+ new UnsupportedOperationException()));\n+ result = strategy.applyFailedShards(clusterState, failedShards);\n+ assertTrue(result.changed());\n+ routingTable = result.routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ assertEquals(routingTable.index(\"idx\").shards().size(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), 1);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).state(), INITIALIZING);\n+ assertEquals(routingTable.index(\"idx\").shard(0).shards().get(0).unassignedInfo().getMessage(), \"ZOOOMG\");\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java", "status": "added" }, { "diff": "@@ -337,7 +337,7 @@ public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNode\n AllocationService strategy = new MockAllocationService(Settings.EMPTY,\n allocationDeciders,\n NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);\n- RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true);\n+ RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true, false);\n // the two indices must stay as is, the replicas cannot move to oldNode2 because versions don't match\n state = ClusterState.builder(state).routingResult(result).build();\n assertThat(result.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));\n@@ -369,7 +369,7 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() {\n AllocationService strategy = new MockAllocationService(Settings.EMPTY,\n allocationDeciders,\n NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);\n- RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true);\n+ RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true, false);\n \n // Make sure that primary shards are only allocated on the new node\n for (int i = 0; i < numberOfShards; i++) {", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java", "status": "modified" }, { "diff": "@@ -283,7 +283,7 @@ public void testOutgoingThrottlesAllocaiton() {\n assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node2\"), 0);\n assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n \n- RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node1\").iterator().next().shardId().id(), \"node1\", \"node2\")));\n+ RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node1\").iterator().next().shardId().id(), \"node1\", \"node2\")), false, false);\n assertEquals(reroute.explanations().explanations().size(), 1);\n assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.YES);\n routingTable = reroute.routingTable();\n@@ -296,7 +296,7 @@ public void testOutgoingThrottlesAllocaiton() {\n assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries(\"node3\"), 0);\n \n // outgoing throttles\n- reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node3\").iterator().next().shardId().id(), \"node3\", \"node1\")), true);\n+ reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node3\").iterator().next().shardId().id(), \"node3\", \"node1\")), true, false);\n assertEquals(reroute.explanations().explanations().size(), 1);\n assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE);\n assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries(\"node1\"), 0);\n@@ -311,7 +311,7 @@ public void testOutgoingThrottlesAllocaiton() {\n assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));\n \n // incoming throttles\n- reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node3\").iterator().next().shardId().id(), \"node3\", \"node2\")), true);\n+ reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(\"test\", clusterState.getRoutingNodes().node(\"node3\").iterator().next().shardId().id(), \"node3\", \"node2\")), true, false);\n assertEquals(reroute.explanations().explanations().size(), 1);\n assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE);\n ", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java", "status": "modified" }, { "diff": "@@ -796,7 +796,7 @@ public void addListener(Listener listener) {\n AllocationCommand relocate1 = new MoveAllocationCommand(\"test\", 0, \"node2\", \"node3\");\n AllocationCommands cmds = new AllocationCommands(relocate1);\n \n- routingTable = strategy.reroute(clusterState, cmds).routingTable();\n+ routingTable = strategy.reroute(clusterState, cmds, false, false).routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n logShardStates(clusterState);\n \n@@ -808,7 +808,7 @@ public void addListener(Listener listener) {\n // node3, which will put it over the low watermark when it\n // completes, with shard relocations taken into account this should\n // throw an exception about not being able to complete\n- strategy.reroute(clusterState, cmds).routingTable();\n+ strategy.reroute(clusterState, cmds, false, false).routingTable();\n fail(\"should not have been able to reroute the shard\");\n } catch (IllegalArgumentException e) {\n assertThat(\"can't allocated because there isn't enough room: \" + e.getMessage(),\n@@ -876,7 +876,7 @@ public void testCanRemainWithShardRelocatingAway() {\n );\n ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();\n RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo,\n- System.nanoTime());\n+ System.nanoTime(), false);\n Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);\n assertThat(decision.type(), equalTo(Decision.Type.NO));\n \n@@ -896,7 +896,8 @@ public void testCanRemainWithShardRelocatingAway() {\n )\n );\n clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();\n- routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());\n+ routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(),\n+ false);\n decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);\n assertThat(decision.type(), equalTo(Decision.Type.YES));\n \n@@ -992,7 +993,7 @@ public void testForSingleDataNode() {\n );\n ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();\n RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo,\n- System.nanoTime());\n+ System.nanoTime(), false);\n Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);\n \n // Two shards should start happily\n@@ -1051,7 +1052,8 @@ public void addListener(Listener listener) {\n );\n \n clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build();\n- routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());\n+ routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(),\n+ false);\n decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);\n assertThat(decision.type(), equalTo(Decision.Type.YES));\n ", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java", "status": "modified" }, { "diff": "@@ -136,7 +136,7 @@ public void testCanAllocateUsesMaxAvailableSpace() {\n ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();\n shardSizes.put(\"[test][0][p]\", 10L); // 10 bytes\n final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of());\n- RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());\n+ RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);\n assertEquals(mostAvailableUsage.toString(), Decision.YES, decider.canAllocate(test_0, new RoutingNode(\"node_0\", node_0), allocation));\n assertEquals(mostAvailableUsage.toString(), Decision.NO, decider.canAllocate(test_0, new RoutingNode(\"node_1\", node_1), allocation));\n }\n@@ -204,7 +204,7 @@ public void testCanRemainUsesLeastAvailableSpace() {\n shardSizes.put(\"[test][2][p]\", 10L);\n \n final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());\n- RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());\n+ RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);\n assertEquals(Decision.YES, decider.canRemain(test_0, new RoutingNode(\"node_0\", node_0), allocation));\n assertEquals(Decision.NO, decider.canRemain(test_1, new RoutingNode(\"node_1\", node_1), allocation));\n try {", "filename": "core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java", "status": "modified" }, { "diff": "@@ -346,7 +346,7 @@ private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocat\n .metaData(metaData)\n .routingTable(routingTable)\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n- return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime());\n+ return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n }\n \n /**\n@@ -425,7 +425,7 @@ private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDecider\n .metaData(metaData)\n .routingTable(routingTable)\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n- return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime());\n+ return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n }\n \n /**\n@@ -444,23 +444,23 @@ public void testEnoughCopiesFoundForAllocationOnLegacyIndex() {\n .routingTable(routingTable)\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n \n- RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());\n+ RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n boolean changed = testAllocator.allocateUnassigned(allocation);\n assertThat(changed, equalTo(false));\n assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));\n assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));\n assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas\n \n testAllocator.addData(node1, 1, null, randomBoolean());\n- allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());\n+ allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n changed = testAllocator.allocateUnassigned(allocation);\n assertThat(changed, equalTo(false));\n assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));\n assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));\n assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas\n \n testAllocator.addData(node2, 1, null, randomBoolean());\n- allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());\n+ allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n changed = testAllocator.allocateUnassigned(allocation);\n assertThat(changed, equalTo(true));\n assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));\n@@ -485,23 +485,23 @@ public void testEnoughCopiesFoundForAllocationOnLegacyIndexWithDifferentVersion(\n .routingTable(routingTable)\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n \n- RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());\n+ RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n boolean changed = testAllocator.allocateUnassigned(allocation);\n assertThat(changed, equalTo(false));\n assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));\n assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));\n assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas\n \n testAllocator.addData(node1, 1, null, randomBoolean());\n- allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());\n+ allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n changed = testAllocator.allocateUnassigned(allocation);\n assertThat(changed, equalTo(false));\n assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));\n assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));\n assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas\n \n testAllocator.addData(node2, 2, null, randomBoolean());\n- allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());\n+ allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n changed = testAllocator.allocateUnassigned(allocation);\n assertThat(changed, equalTo(true));\n assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));\n@@ -525,7 +525,7 @@ private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDe\n .metaData(metaData)\n .routingTable(routingTableBuilder.build())\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n- return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime());\n+ return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false);\n }\n \n class TestAllocator extends PrimaryShardAllocator {", "filename": "core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java", "status": "modified" }, { "diff": "@@ -302,7 +302,7 @@ private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders decide\n .metaData(metaData)\n .routingTable(routingTable)\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n- return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime());\n+ return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false);\n }\n \n private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {\n@@ -324,7 +324,7 @@ private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDecid\n .metaData(metaData)\n .routingTable(routingTable)\n .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();\n- return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime());\n+ return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false);\n }\n \n class TestAllocator extends ReplicaShardAllocator {", "filename": "core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java", "status": "modified" }, { "diff": "@@ -103,7 +103,7 @@ public void testUnassignedShardAndEmptyNodesInRoutingTable() throws Exception {\n .nodes(DiscoveryNodes.EMPTY_NODES)\n .build(), false\n );\n- RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime());\n+ RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime(), false);\n allocator.allocateUnassigned(routingAllocation);\n }\n ", "filename": "core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 5.x/ current master\n\n**JVM version**: Oracle Corporation 1.8.0_60 [Java HotSpot(TM) 64-Bit Server VM 25.60-b23]\n\n**OS version**: Fedora release 23 (Twenty Three)\n\n**Description of the problem including expected versus actual behavior**:\n\nRunning the rescore query as documented against a current master built fails to parse the query json. See #18424 for original discussion.\n\n**Steps to reproduce**:\n1. Clone repository, build the source and start elasticsearch\n2. Submit the following query:\n \n `GET /_search{`\n \n ```\n \"query\" : {\n \"match\" : {\n \"field1\" : {\n \"operator\" : \"or\",\n \"query\" : \"the quick brown\",\n \"type\" : \"boolean\"\n }\n }\n },\n \"rescore\" : {\n \"window_size\" : 50,\n \"query\" : {\n \"rescore_query\" : {\n \"match\" : {\n \"field1\" : {\n \"query\" : \"the quick brown\",\n \"type\" : \"phrase\",\n \"slop\" : 2\n }\n }\n },\n \"query_weight\" : 0.7,\n \"rescore_query_weight\" : 1.2\n }}}\n ```\n\nWhich essentially is a plain copy of the example here: https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-rescore.html\n\nError message I get:\n\n```\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"parsing_exception\",\n \"reason\": \"Unknown key for a START_OBJECT in [rescore].\",\n \"line\": 11,\n \"col\": 16\n }\n ],\n \"type\": \"parsing_exception\",\n \"reason\": \"Unknown key for a START_OBJECT in [rescore].\",\n \"line\": 11,\n \"col\": 16\n },\n \"status\": 400\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"parsing_exception\",\n \"reason\": \"Unknown key for a START_OBJECT in [rescore].\",\n \"line\": 11,\n \"col\": 16\n }\n ],\n \"type\": \"parsing_exception\",\n \"reason\": \"Unknown key for a START_OBJECT in [rescore].\",\n \"line\": 11,\n \"col\": 16\n },\n \"status\": 400\n}\n```\n\nFound thanks to the lovely doc snippet testing by @nik9000 - hunch about what's going wrong thanks to analysis by @cbuescher \n", "comments": [ { "body": "We are currently only parsing the array-syntax for the `rescore` part in SearchSourceBuilder (`\"rescore\" : [ {...}, {...} ]`) . We also need to support `\"rescore\" : {...}`\n", "created_at": "2016-05-18T12:24:03Z" } ], "number": 18439, "title": "Rescoring as documented in reference guide broken on master" }
{ "body": "We are currently only parsing the array-syntax for the rescore part in SearchSourceBuilder (`\"rescore\" : [ {...}, {...} ]`) . \nWe also used to support `\"rescore\" : {...}`, so adding this back to the parsing in SearchSourceBuilder.\n\nCloses #18439\n", "number": 18440, "review_comments": [], "title": "Fix parsing single `rescore` element in SearchSourceBuilder" }
{ "commits": [ { "message": "Fix parsing single `rescore` element in SearchSourceBuilder\n\nWe are currently only parsing the array-syntax for the rescore part\nin SearchSourceBuilder (\"rescore\" : [ {...}, {...} ]) . We also need\nto support \"rescore\" : {...}\n\nCloses #18439" } ], "files": [ { "diff": "@@ -1033,6 +1033,9 @@ public void parseXContent(QueryParseContext context, AggregatorParsers aggParser\n suggestBuilder = SuggestBuilder.fromXContent(context, suggesters);\n } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) {\n sorts = new ArrayList<>(SortBuilder.fromXContent(context));\n+ } else if (context.getParseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {\n+ rescoreBuilders = new ArrayList<>();\n+ rescoreBuilders.add(RescoreBuilder.parseFromXContent(context));\n } else if (context.getParseFieldMatcher().match(currentFieldName, EXT_FIELD)) {\n XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);\n ext = xContentBuilder.bytes();", "filename": "core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java", "status": "modified" }, { "diff": "@@ -72,6 +72,7 @@\n import org.elasticsearch.search.fetch.source.FetchSourceContext;\n import org.elasticsearch.search.highlight.HighlightBuilderTests;\n import org.elasticsearch.search.rescore.QueryRescoreBuilderTests;\n+import org.elasticsearch.search.rescore.QueryRescorerBuilder;\n import org.elasticsearch.search.searchafter.SearchAfterBuilder;\n import org.elasticsearch.search.sort.FieldSortBuilder;\n import org.elasticsearch.search.sort.ScoreSortBuilder;\n@@ -561,6 +562,57 @@ public void testParseSort() throws IOException {\n }\n }\n \n+ /**\n+ * test that we can parse the `rescore` element either as single object or as array\n+ */\n+ public void testParseRescore() throws IOException {\n+ {\n+ String restContent = \"{\\n\" +\n+ \" \\\"query\\\" : {\\n\" +\n+ \" \\\"match\\\": { \\\"content\\\": { \\\"query\\\": \\\"foo bar\\\" }}\\n\" +\n+ \" },\\n\" +\n+ \" \\\"rescore\\\": {\" +\n+ \" \\\"window_size\\\": 50,\\n\" +\n+ \" \\\"query\\\": {\\n\" +\n+ \" \\\"rescore_query\\\" : {\\n\" +\n+ \" \\\"match\\\": { \\\"content\\\": { \\\"query\\\": \\\"baz\\\" } }\\n\" +\n+ \" }\\n\" +\n+ \" }\\n\" +\n+ \" }\\n\" +\n+ \"}\\n\";\n+ try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {\n+ SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),\n+ aggParsers, suggesters);\n+ assertEquals(1, searchSourceBuilder.rescores().size());\n+ assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery(\"content\", \"baz\")).windowSize(50),\n+ searchSourceBuilder.rescores().get(0));\n+ }\n+ }\n+\n+ {\n+ String restContent = \"{\\n\" +\n+ \" \\\"query\\\" : {\\n\" +\n+ \" \\\"match\\\": { \\\"content\\\": { \\\"query\\\": \\\"foo bar\\\" }}\\n\" +\n+ \" },\\n\" +\n+ \" \\\"rescore\\\": [ {\" +\n+ \" \\\"window_size\\\": 50,\\n\" +\n+ \" \\\"query\\\": {\\n\" +\n+ \" \\\"rescore_query\\\" : {\\n\" +\n+ \" \\\"match\\\": { \\\"content\\\": { \\\"query\\\": \\\"baz\\\" } }\\n\" +\n+ \" }\\n\" +\n+ \" }\\n\" +\n+ \" } ]\\n\" +\n+ \"}\\n\";\n+ try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {\n+ SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),\n+ aggParsers, suggesters);\n+ assertEquals(1, searchSourceBuilder.rescores().size());\n+ assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery(\"content\", \"baz\")).windowSize(50),\n+ searchSourceBuilder.rescores().get(0));\n+ }\n+ }\n+ }\n+\n public void testEmptyPostFilter() throws IOException {\n SearchSourceBuilder builder = new SearchSourceBuilder();\n builder.postFilter(new EmptyQueryBuilder());", "filename": "core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java", "status": "modified" } ] }
{ "body": "This pr https://github.com/elastic/elasticsearch/pull/15320\ncannot be easily ported to master because we need to translate mvn/ant to gradle which might take me a little so I open an issue here.\n", "comments": [ { "body": "I don't think the change will be that difficult, but I would also wait until I've finished refactoring ES into a fixture. Part of that is to use ProcessBuilder instead of ant exec, at which point we will no longer use the pidfiles for correct test execution, and the command line qa test you have here can set the options and check the pid file itself.\n", "created_at": "2016-01-20T16:40:52Z" }, { "body": "> I would also wait until I've finished refactoring ES into a fixture\n\nok\n", "created_at": "2016-01-20T16:41:55Z" }, { "body": "@rjernst does this still need doing?\n", "created_at": "2016-05-03T14:05:40Z" }, { "body": "> does this still need doing?\n\nThe fix has been done in master in e32da555aa56b20f914610282392f51c2d12b8ac by @ywelsch after the command-line parsing refactoring in #17088 but the tests have not been forward-ported.\n", "created_at": "2016-05-03T14:16:30Z" }, { "body": "@brwe would you be able to port the tests to master please?\n", "created_at": "2016-05-07T15:33:51Z" }, { "body": "ok\n", "created_at": "2016-05-09T08:58:09Z" }, { "body": "We discussed it in #18437 and came to the conclusion that might make more sense to invest in bats tests for windows (https://github.com/elastic/elasticsearch/issues/18475) instead of having this one test in gradle or the likes.\n", "created_at": "2016-05-19T18:57:35Z" }, { "body": "superseded by #18475\n", "created_at": "2016-05-25T12:22:53Z" } ], "number": 16129, "title": "port fix for windows command line options to master" }
{ "body": "This commit adds a test for command line options -p, --version and --help\n\ncloses #16129\n", "number": 18437, "review_comments": [ { "body": "This seems like a strange thing to do in a clean task.\n", "created_at": "2016-05-18T11:38:16Z" }, { "body": "You've named the variable for the clean task \"setup\"?\n", "created_at": "2016-05-18T11:38:28Z" }, { "body": "I might just name the variable for the clean task \"clean\" and use \"setup\" for these?\n", "created_at": "2016-05-18T11:39:39Z" }, { "body": "I copied that part from here: https://github.com/elastic/elasticsearch/blob/master/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy#L149 I figured the directory has to be created somewhere and did not want to add an additional task for it. But I can totally add that somewhere else. Have a suggestion where a good place would be? \n", "created_at": "2016-05-18T12:42:19Z" }, { "body": "I will rename it to \"task\". Better?\n", "created_at": "2016-05-18T12:43:10Z" }, { "body": "ok\n", "created_at": "2016-05-18T12:43:29Z" } ], "title": "[TEST] test command line options" }
{ "commits": [ { "message": "[TEST] test command line options\n\nThis commit adds a test for command line options -p, --version and --help\n\ncloses #16129" }, { "message": "rename variables" } ], "files": [ { "diff": "@@ -0,0 +1,35 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.gradle.test\n+\n+import org.gradle.api.Plugin\n+import org.gradle.api.Project\n+import org.gradle.api.plugins.JavaBasePlugin\n+\n+/** A plugin to test command line options like --version etc. */\n+public class CommandLineTestPlugin implements Plugin<Project> {\n+\n+ @Override\n+ public void apply(Project project) {\n+ // is where \"check\" comes from\n+ project.pluginManager.apply(JavaBasePlugin)\n+ CommandLineTestTask integTest = project.tasks.create('commandLineTest', CommandLineTestTask.class)\n+ project.check.dependsOn(integTest)\n+ }\n+}", "filename": "buildSrc/src/main/groovy/org/elasticsearch/gradle/test/CommandLineTestPlugin.groovy", "status": "added" }, { "diff": "@@ -0,0 +1,229 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.gradle.test\n+\n+import org.apache.tools.ant.taskdefs.condition.Os\n+import org.elasticsearch.gradle.LoggedExec\n+import org.elasticsearch.gradle.VersionProperties\n+import org.gradle.api.AntBuilder\n+import org.gradle.api.DefaultTask\n+import org.gradle.api.Project\n+import org.gradle.api.Task\n+import org.gradle.api.artifacts.Configuration\n+import org.gradle.api.plugins.JavaBasePlugin\n+import org.gradle.api.tasks.Delete\n+import org.gradle.api.tasks.Input\n+import org.gradle.util.ConfigureUtil\n+\n+/**\n+ * Starts nodes with different command line options (-p pid, --help, ...) and checks that all is working OK.\n+ */\n+public class CommandLineTestTask extends DefaultTask {\n+\n+ ClusterConfiguration clusterConfig = new ClusterConfiguration()\n+\n+\n+ public CommandLineTestTask() {\n+\n+ description = 'Checks that command line options like -V, -p pid and so on work.'\n+ group = JavaBasePlugin.VERIFICATION_GROUP\n+\n+ // this is our current version distribution configuration we use for all kinds of REST tests etc.\n+ project.configurations {\n+ elasticsearchDistro\n+ }\n+ // directory needed for installing and testing\n+ File sharedDir = new File(project.buildDir, \"cluster/shared\")\n+ clusterConfig.daemonize = false\n+ clusterConfig.distribution = 'zip'\n+ NodeInfo node = new NodeInfo(clusterConfig, 0, project, this, VersionProperties.elasticsearch, sharedDir)\n+ Configuration configuration = project.configurations.elasticsearchDistro\n+\n+ // cleanup task\n+ Task clean = project.tasks.create(name: ClusterFormationTasks.taskName(this, node, 'clean'), type: Delete) {\n+ delete node.homeDir\n+ delete node.cwd\n+ doLast {\n+ node.cwd.mkdirs()\n+ }\n+ }\n+\n+ // prepares extraction and configuration setup so we can start the node\n+ Task setup = ClusterFormationTasks.configureExtractTask(ClusterFormationTasks.taskName(this, node, 'extract'), project, clean,\n+ node, configuration)\n+ ClusterFormationTasks.configureDistributionDependency(project, clusterConfig.distribution, project.configurations.elasticsearchDistro,\n+ VersionProperties.elasticsearch)\n+ setup = ClusterFormationTasks.configureWriteConfigTask(ClusterFormationTasks.taskName(this, node, 'configure'), project, setup,\n+ node)\n+\n+ // check that --help works\n+ setup = configureInfoTask(ClusterFormationTasks.taskName(this, node, 'test-help-param'), project, setup, node, \"-E \" +\n+ \"<KeyValuePair> Configure an Elasticsearch setting\", \"--help\");\n+\n+ // check that --version works\n+ boolean isSnapshot = VersionProperties.elasticsearch.endsWith(\"-SNAPSHOT\");\n+ String version = VersionProperties.elasticsearch;\n+ if (isSnapshot) {\n+ version = version.substring(0, version.length() - 9)\n+ }\n+ setup = configureInfoTask(ClusterFormationTasks.taskName(this, node, 'test-version-param'), project, setup, node,\n+ \"Version: \" + version, \"--version\");\n+\n+ // check that -p works\n+ File pidFile = new File(sharedDir, \"pidForTest-p\")\n+ setup = configurePidParamTask(ClusterFormationTasks.taskName(this, node, 'test-pid-param-start'), project, setup, node, pidFile\n+ .absolutePath)\n+ setup = configurePidCheckTask(ClusterFormationTasks.taskName(this, node, 'test-pid-param-check'), project, setup, pidFile)\n+ setup = configureStopTask(ClusterFormationTasks.taskName(this, node, 'test-pid-param-stop'), project, setup, pidFile)\n+ this.dependsOn(setup)\n+\n+ }\n+\n+ /** Adds a task to start an elasticsearch with a command line option that will not actually start the node (--help or --version) */\n+ static Task configureInfoTask(String name, Project project, Task setup, NodeInfo node, String matchString, String\n+ commandLineParameter) {\n+\n+ // this closure is converted into ant nodes by groovy's AntBuilder\n+ Closure antRunner = { AntBuilder ant ->\n+ ant.exec(executable: node.executable, spawn: false, dir: node.cwd, taskname: 'elasticsearch') {\n+ node.env.each { key, value -> env(key: key, value: value) }\n+ node.args.each { arg(value: it) }\n+ arg(value: commandLineParameter) // add the command line param we want to test\n+ }\n+ }\n+\n+ // this closure is the actual code to run elasticsearch and check the output\n+ Closure elasticsearchRunner = {\n+ // capture output so we can check it later\n+ ByteArrayOutputStream outputStream = new ByteArrayOutputStream();\n+ PrintStream captureStream = new PrintStream(outputStream, true, \"UTF-8\")\n+ ClusterFormationTasks.runAntCommand(project, antRunner, captureStream, captureStream)\n+ String output = new String(outputStream.toByteArray());\n+\n+ if (output.contains(matchString) == false || output.contains(\"ERROR\")) {\n+ logger.error(\"Start elasticsearch with \" + commandLineParameter + \" failed.\")\n+ logger.error(\"Here is the message:\")\n+ logger.error(output)\n+ logger.error(\"in which we were looking for this string: \" + \"\\\"\" + matchString + \"\\\"\")\n+ logger.error(\"Started elasticsearch with this command: \" + node.executable + \" \" + node.args.join(' ') + \" \" +\n+ commandLineParameter)\n+ throw new Exception(\"Test command line options failed.\")\n+ }\n+ }\n+\n+ Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)\n+ start.doLast(elasticsearchRunner)\n+ return start\n+ }\n+\n+ /** Adds a task to start an elasticsearch node with -p pid option */\n+ static Task configurePidParamTask(String name, Project project, Task setup, NodeInfo node, String pathToPID) {\n+\n+ // this closure is converted into ant nodes by groovy's AntBuilder\n+ Closure antRunner = { AntBuilder ant ->\n+ ant.exec(executable: node.executable, spawn: true, dir: node.cwd, taskname: 'elasticsearch') {\n+ node.env.each { key, value -> env(key: key, value: value) }\n+ node.args.each { arg(value: it) }\n+ arg(value: '-p') // add -p\n+ arg(value: pathToPID) // add the path to the pid\n+ }\n+ }\n+\n+ // start the node\n+ Closure elasticsearchRunner = {\n+ ClusterFormationTasks.runAntCommand(project, antRunner, System.out, System.err)\n+ }\n+\n+ Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)\n+ start.doLast(elasticsearchRunner)\n+ return start\n+ }\n+\n+ /** Adds a task to check that the pid was actually written */\n+ static Task configurePidCheckTask(String name, Project project, Task depends, File pidFile) {\n+\n+ Task wait = project.tasks.create(name: name, dependsOn: depends)\n+ wait.doLast {\n+ ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '5000', checkeveryunit: 'millisecond', timeoutproperty:\n+ \"failed${name}\") {\n+ resourceexists {\n+ file(file: pidFile.toString())\n+ }\n+ }\n+ if (pidFile.exists() == false) {\n+ throw new Exception(\"Pid file \" + pidFile + \" was not found! Beware, there might be a rogue elasticsearch instance \" +\n+ \"running and we cannot shut it down because we don't know which pid it has.\")\n+ }\n+ }\n+ return wait\n+ }\n+\n+ /** Adds a task to kill an elasticsearch node with the given pidfile */\n+ // TODO: Maybe this code can be shared with ClusterFormationTasks.configureStopTask ?\n+ static Task configureStopTask(String name, Project project, Task depends, File pidFile) {\n+ return project.tasks.create(name: name, type: LoggedExec, dependsOn: depends) {\n+ onlyIf { pidFile.exists() }\n+ // the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString\n+ ext.pid = \"${-> pidFile.getText('UTF-8').trim()}\"\n+ doFirst {\n+ logger.info(\"Shutting down external node with pid ${pid}\")\n+ }\n+ if (Os.isFamily(Os.FAMILY_WINDOWS)) {\n+ executable 'Taskkill'\n+ args '/PID', pid, '/F'\n+ } else {\n+ executable 'kill'\n+ args '-9', pid\n+ }\n+ doLast {\n+ project.delete(pidFile)\n+ }\n+ }\n+ }\n+\n+ @Input\n+ public void cluster(Closure closure) {\n+ ConfigureUtil.configure(closure, clusterConfig)\n+ }\n+\n+ public ClusterConfiguration getCluster() {\n+ return clusterConfig\n+ }\n+\n+ @Override\n+ public Task dependsOn(Object... dependencies) {\n+ super.dependsOn(dependencies)\n+ for (Object dependency : dependencies) {\n+ if (dependency instanceof Fixture) {\n+ finalizedBy(((Fixture) dependency).stopTask)\n+ }\n+ }\n+ return this\n+ }\n+\n+ @Override\n+ public void setDependsOn(Iterable<?> dependencies) {\n+ super.setDependsOn(dependencies)\n+ for (Object dependency : dependencies) {\n+ if (dependency instanceof Fixture) {\n+ finalizedBy(((Fixture) dependency).stopTask)\n+ }\n+ }\n+ }\n+}", "filename": "buildSrc/src/main/groovy/org/elasticsearch/gradle/test/CommandLineTestTask.groovy", "status": "added" }, { "diff": "@@ -0,0 +1 @@\n+implementation-class=org.elasticsearch.gradle.test.CommandLineTestPlugin", "filename": "buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.cmd-test.properties", "status": "added" }, { "diff": "@@ -0,0 +1,5 @@\n+\n+apply plugin: 'elasticsearch.cmd-test'\n+\n+commandLineTest {\n+}", "filename": "qa/smoke-test-command-line-params/build.gradle", "status": "added" }, { "diff": "@@ -44,6 +44,7 @@ List projects = [\n 'qa:backwards-5.0',\n 'qa:evil-tests',\n 'qa:smoke-test-client',\n+ 'qa:smoke-test-command-line-params',\n 'qa:smoke-test-multinode',\n 'qa:smoke-test-reindex-with-groovy',\n 'qa:smoke-test-plugins',", "filename": "settings.gradle", "status": "modified" } ] }
{ "body": "See for example: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-os-compatibility/os=debian/405/consoleText\n\nI can reproduce if I run this in a loop like so:\n\n```\n gradle :core:integTest -Dtests.seed=FCF3BEA90D893CAF -Dtests.class=org.elasticsearch.indices.memory.breaker.CircuitBreakerServiceIT -Dtests.method=\"testParentChecking\" -Des.logger.level=WARN -Dtests.security.manager=true -Dtests.jvm.argline=\"-XX:-UseParNewGC -XX:-UseConcMarkSweepGC -XX:+UseG1GC\" -Dtests.locale=es-UY -Dtests.timezone=America/Menominee -Dtests.iters=100 -Dtests.failfast=true\n```\n", "comments": [], "number": 18325, "title": "CircuitBreakerServiceIT.testParentChecking fails frequently" }
{ "body": "With this commit we clear all caches after testing the parent circuit breaker.\nThis is necessary as caches hold on to circuit breakers internally. Additionally,\ndue to usage of `CircuitBreaker#addWithoutBreaking()` in caches, it's even possible\nto go above the limit. As a consequence, all subsequent requests fall victim to\nthe limit.\n\nHence, right after the parent circuit breaker tripped, we clear all caches to\nreduce these circuit breakers to 0 again. We also exclude the clear caches\ntransport request from limit check in order to ensure it will succeed. As this is\ntypically a very small and low-volume request, it is deemed ok to exclude it.\n\nCloses #18325\n", "number": 18432, "review_comments": [], "title": "Clear all caches after testing parent breaker" }
{ "commits": [ { "message": "Clear all caches after testing parent breaker\n\nWith this commit we clear all caches after testing the parent circuit breaker.\nThis is necessary as caches hold on to circuit breakers internally. Additionally,\ndue to usage of CircuitBreaker#addWithoutBreaking() in caches, it's even possible\nto go above the limit. As a consequence, all subsequent requests fall victim to\nthe limit.\n\nHence, right after the parent circuit breaker tripped, we clear all caches to\nreduce these circuit breakers to 0 again. We also exclude the clear caches\ntransport request from limit check in order to ensure it will succeed. As this is\ntypically a very small and low-volume request, it is deemed ok to exclude it.\n\nCloses #18325" } ], "files": [ { "diff": "@@ -54,7 +54,7 @@ public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool\n TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,\n IndexNameExpressionResolver indexNameExpressionResolver) {\n super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,\n- ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT);\n+ ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false);\n this.indicesService = indicesService;\n }\n ", "filename": "core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java", "status": "modified" }, { "diff": "@@ -84,6 +84,20 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe\n \n final String transportNodeBroadcastAction;\n \n+ public TransportBroadcastByNodeAction(\n+ Settings settings,\n+ String actionName,\n+ ThreadPool threadPool,\n+ ClusterService clusterService,\n+ TransportService transportService,\n+ ActionFilters actionFilters,\n+ IndexNameExpressionResolver indexNameExpressionResolver,\n+ Supplier<Request> request,\n+ String executor) {\n+ this(settings, actionName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, request,\n+ executor, true);\n+ }\n+\n public TransportBroadcastByNodeAction(\n Settings settings,\n String actionName,\n@@ -93,15 +107,17 @@ public TransportBroadcastByNodeAction(\n ActionFilters actionFilters,\n IndexNameExpressionResolver indexNameExpressionResolver,\n Supplier<Request> request,\n- String executor) {\n+ String executor,\n+ boolean canTripCircuitBreaker) {\n super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);\n \n this.clusterService = clusterService;\n this.transportService = transportService;\n \n transportNodeBroadcastAction = actionName + \"[n]\";\n \n- transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, new BroadcastByNodeTransportRequestHandler());\n+ transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, false, canTripCircuitBreaker,\n+ new BroadcastByNodeTransportRequestHandler());\n }\n \n private Response newResponse(", "filename": "core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java", "status": "modified" }, { "diff": "@@ -75,6 +75,9 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {\n /** Reset all breaker settings back to their defaults */\n private void reset() {\n logger.info(\"--> resetting breaker settings\");\n+ // clear all caches, we could be very close (or even above) the limit and then we will not be able to reset the breaker settings\n+ client().admin().indices().prepareClearCache().setFieldDataCache(true).setQueryCache(true).setRequestCache(true).get();\n+\n Settings resetSettings = Settings.builder()\n .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(),\n HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null))\n@@ -214,7 +217,6 @@ public void testRamAccountingTermsEnum() throws Exception {\n * Test that a breaker correctly redistributes to a different breaker, in\n * this case, the fielddata breaker borrows space from the request breaker\n */\n- @AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/18325\")\n public void testParentChecking() throws Exception {\n if (noopBreakerUsed()) {\n logger.info(\"--> noop breakers used, skipping test\");\n@@ -274,9 +276,6 @@ public void testParentChecking() throws Exception {\n cause.toString(), startsWith(\"CircuitBreakingException[[parent] Data too large\"));\n assertThat(\"Exception: [\" + cause.toString() + \"] should contain a CircuitBreakingException\",\n cause.toString(), endsWith(errMsg));\n- } finally {\n- // reset before teardown as it requires properly set up breakers\n- reset();\n }\n }\n ", "filename": "core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java", "status": "modified" } ] }
{ "body": "**Elasticsearch version**: 2.3.2\n\n**JVM version**: 1.8.0_92\n\n**OS version**: Windows 7\n\n**Description of the problem including expected versus actual behavior**:\n\nIt appears that the DateHistogram aggregation has some troubles when time zone is set, and the histogram with hourly interval passes a DST transition (eg CEST -> CET). \n\n**Steps to reproduce**:\n\nTests reproducing the error in this diff against v2.3.2: https://github.com/nilsga/elasticsearch/commit/155e3081ca04cd63e6b973dbe4cebd13bb7972b8\n\n**Provide logs (if relevant)**:\n\nThe error is \n\n```\nFailed to execute phase [merge], [reduce] \n\n at __randomizedtesting.SeedInfo.seed([71B41DDE1915B1DC:6476BA807BC1E239]:0)\n at org.elasticsearch.action.search.SearchQueryAndFetchAsyncAction$1.onFailure(SearchQueryAndFetchAsyncAction.java:76)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:39)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n at java.lang.Thread.run(Thread.java:745)\nCaused by: java.lang.AssertionError\n at org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.addEmptyBuckets(InternalHistogram.java:442)\n at org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.doReduce(InternalHistogram.java:466)\n at org.elasticsearch.search.aggregations.InternalAggregation.reduce(InternalAggregation.java:153)\n at org.elasticsearch.search.aggregations.InternalAggregations.reduce(InternalAggregations.java:170)\n at org.elasticsearch.search.controller.SearchPhaseController.merge(SearchPhaseController.java:411)\n at org.elasticsearch.action.search.SearchQueryAndFetchAsyncAction$1.doRun(SearchQueryAndFetchAsyncAction.java:64)\n at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)\n ... 3 more\n```\n\nI think it is related to the implementation of `TimeZoneRounding.nextRoundingInterval`. I don't quite get why time is converted from UTC to local time, and then back to UTC. I would expect a certain point of time to be constant, regardless of timezone. Formatting of time, however, I can understand would require time zone specific behaviour.\n", "comments": [ { "body": "Relates #18310\n", "created_at": "2016-05-13T12:37:21Z" }, { "body": "I have not fully understood the logic yet, but It seems odd that the rounding logic is implemented by actually shifting the time according to local \"calendar\" time. All duration fields in `DateTimeUnit` seems to be UTC based. If one instead looked up the `DateTimeField` with the correct timezone of the chronology, it seems to me like the rounding should be provided \"out of the box\" by Joda.\n", "created_at": "2016-05-14T16:20:28Z" }, { "body": "@nilsga great catch, I'm looking into this and think I found the glitch in the rounding logic of `TimeZoneRounding.nextRoundingInterval`. The conversion from UTC to local time is necessary there for day or month intervals, otherwise we would end up cases where `TimeZoneRounding.nextRoundingInterval` is not on the correct start of day/month when crossing dst transitions. But I think for smaller durations (hours in your case, minutes and seconds also) we should not do this. I will need to look a bit closer and add some test to be sure though.\n", "created_at": "2016-05-17T10:15:38Z" }, { "body": "This is how this behaviour presents itself outside of unit-tests: create an empty index, add any document with a date field, then do a histogram query with extended bounds covering the last DST end (e.g. Oslo/Europe 2015-10-25, 2015-10-25T02:00:00.000+02:00):\n\n```\nDELETE /_all\n\nPUT /test/type/1\n{\n \"dateField\" : \"2000-01-01\"\n}\n\nGET /test/type/_search\n{\n \"query\": {\n \"match_none\": {}\n },\n \"aggs\": {\n \"events_by_date\": {\n \"date_histogram\": {\n \"field\": \"dateField\",\n \"interval\": \"1h\",\n \"time_zone\": \"Europe/Oslo\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\n \"min\": \"2015-10-25T02:00:00.000+02:00\",\n \"max\": \"2015-10-25T04:00:00.000+01:00\"\n }\n }\n }\n }\n}\n\n\"aggregations\": {\n \"events_by_date\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-10-25T02:00:00.000+02:00\",\n \"key\": 1445731200000,\n \"doc_count\": 0\n },\n {\n \"key_as_string\": \"2015-10-25T03:00:00.000+01:00\",\n \"key\": 1445738400000,\n \"doc_count\": 0\n },\n {\n \"key_as_string\": \"2015-10-25T04:00:00.000+01:00\",\n \"key\": 1445742000000,\n \"doc_count\": 0\n }\n ]\n }\n }\n```\n\nThe problem is that when looking at the bucket keys (utc), the first bucket covers two hours (1445738400000 - 1445731200000 = 7200000) whereas the other buckets are one hour(3600000 ms) wide.\n\nIn the other direction (start of DST) we dont have the same problem. Since local time is advanced by one hour, we still have 1h buckets, but you can see the jump in \"key_as_string\":\n\n```\nGET /test/type/_search\n{\n \"query\": {\n \"match_none\": {}\n },\n \"aggs\": {\n \"events_by_date\": {\n \"date_histogram\": {\n \"field\": \"dateField\",\n \"interval\": \"1h\",\n \"time_zone\": \"Europe/Oslo\",\n \"min_doc_count\": 0,\n \"extended_bounds\": {\n \"min\": \"2016-03-27T01:00:00.000+01:00\",\n \"max\": \"2016-03-27T04:00:00.000+02:00\"\n }\n }\n }\n }\n}\n\n\"aggregations\": {\n \"events_by_date\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2016-03-27T01:00:00.000+01:00\",\n \"key\": 1459036800000,\n \"doc_count\": 0\n },\n {\n \"key_as_string\": \"2016-03-27T03:00:00.000+02:00\",\n \"key\": 1459040400000,\n \"doc_count\": 0\n },\n {\n \"key_as_string\": \"2016-03-27T04:00:00.000+02:00\",\n \"key\": 1459044000000,\n \"doc_count\": 0\n }\n ]\n }\n } \n```\n\nI think we should have equally spaced date histogram buckets for hour and sub-hour time units and will open a PR for that.\n", "created_at": "2016-05-17T14:04:28Z" }, { "body": "Thanks for looking into it. I understand that you need to have correctly aligned buckets for all time units. But I don't understand why this could not be handled by the JodaTime library by using `ISOChronology.getInstance(timeZone)` instead of `ISOChronology.getInstanceUTC()`, which would yield time zone adjusted \"floor\" values for `DateTimeField.roundFloor`?\n\n```\nDateTime time = new DateTime(1445731200000L, DateTimeZone.UTC);\nDateTimeField fieldUtc = ISOChronology.getInstanceUTC().dayOfMonth(); // As in DateTimeUnit enum\nDateTimeField field = ISOChronology.getInstance(DateTimeZone.forID(\"Europe/Oslo\")).dayOfMonth(); // Time zone \"enabled\" version of the same field.\nDateTime rounded = new DateTime(field.roundFloor(time.getMillis()), DateTimeZone.forID(\"Europe/Oslo\"));\nDateTime roundedUtc = new DateTime(fieldUtc.roundFloor(time.getMillis()), DateTimeZone.UTC);\nSystem.out.println(rounded);\nSystem.out.println(roundedUtc);\nSystem.out.println(rounded.withZone(DateTimeZone.UTC));\n```\n\nOutput:\n\n```\n2015-10-25T00:00:00.000+02:00\n2015-10-25T00:00:00.000Z\n2015-10-24T22:00:00.000Z\n```\n", "created_at": "2016-05-17T20:41:01Z" }, { "body": "@nilsga thanks for pointing this out. As far as I see, this will do the same thing in ZonedChronology#roundFloor() that we are currently do in our TimeZoneRounding#roundKey(), namely converting UTC to local and back. It might be worth looking into whether using Joda directly for this would simplify our code base or not.\n", "created_at": "2016-05-18T14:24:10Z" } ], "number": 18326, "title": "DST issues with DateHistogram" }
{ "body": "Currently, rounding intervals obtained by `TimeUnitRounding#nextRoundingValue()` for hour, minute and second time units can include an extra hour when \noverlapping a DST transitions that adds an extra hour (eg CEST -> CET). This changes the rounding logic for time units smaller or equal to an hour to fix this.\n\nInternally when computing `nextRoundingValue()` we convert to time local to the time zone unsed for rounding before incrementing by one unit. This is necessary for day units and \nlarger to make sure we again arive at a timestamp that is rounded according to the timezone/unit. When crossing DST changes, this might for example add 23h, or 25h to arive \nat a start-of-day date, so in this case the differences between values obtained by repeatedly calling `#nextRoundingValue()` is not supposed to always we the same.\n\nFor time units equal or smaller to an hour we can ommit the conversion to local time and directly add the DurtionField, since we are sure to land again on a rounded date.\n\nCloses #18326 \n", "number": 18415, "review_comments": [ { "body": "maybe we could do something like `unit.field().add(0, 1) >= number_of_millis_in_a_day`?\n", "created_at": "2016-05-18T11:35:11Z" }, { "body": "I'm not sure we should to do any arithmetic only to check what kind of enum we have. There are only eight options, checking if any of the three are set seems clearer to me, wdyt?\n", "created_at": "2016-05-18T11:50:41Z" }, { "body": "I was only worried that we might add options in the future and forget to add them here.\n", "created_at": "2016-05-18T12:18:52Z" } ], "title": "Fix TimeUnitRounding for hour, minute and second units" }
{ "commits": [ { "message": "Fix TimeZoneRounding#nextRoundingValue for hour, minute and second units\n\nCurrently rounding intervals obtained by nextRoundingValue() for hour, minute and\nsecond units can include an extra hour when happening at DST transitions that add\nan extra hour (eg CEST -> CET). This changes the rounding logic for time units\nsmaller or equal to an hour to fix this.\n\nCloses #18326" } ], "files": [ { "diff": "@@ -53,6 +53,15 @@ public DateTimeField field() {\n return field;\n }\n \n+ /**\n+ * @param unit the {@link DateTimeUnit} to check\n+ * @return true if the unit is a day or longer\n+ */\n+ public static boolean isDayOrLonger(DateTimeUnit unit) {\n+ return (unit == DateTimeUnit.HOUR_OF_DAY || unit == DateTimeUnit.MINUTES_OF_HOUR\n+ || unit == DateTimeUnit.SECOND_OF_MINUTE) == false;\n+ }\n+\n public static DateTimeUnit resolve(byte id) {\n switch (id) {\n case 1: return WEEK_OF_WEEKYEAR;", "filename": "core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java", "status": "modified" }, { "diff": "@@ -46,8 +46,8 @@ public static Builder builder(TimeValue interval) {\n \n public static class Builder {\n \n- private DateTimeUnit unit;\n- private long interval = -1;\n+ private final DateTimeUnit unit;\n+ private final long interval;\n \n private DateTimeZone timeZone = DateTimeZone.UTC;\n \n@@ -142,10 +142,15 @@ public long valueForKey(long time) {\n \n @Override\n public long nextRoundingValue(long time) {\n- long timeLocal = time;\n- timeLocal = timeZone.convertUTCToLocal(time);\n- long nextInLocalTime = durationField.add(timeLocal, 1);\n- return timeZone.convertLocalToUTC(nextInLocalTime, false);\n+ if (DateTimeUnit.isDayOrLonger(unit)) {\n+ time = timeZone.convertUTCToLocal(time);\n+ }\n+ long next = durationField.add(time, 1);\n+ if (DateTimeUnit.isDayOrLonger(unit)) {\n+ return timeZone.convertLocalToUTC(next, false);\n+ } else {\n+ return next;\n+ }\n }\n \n @Override\n@@ -161,12 +166,12 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeByte(unit.id());\n out.writeString(timeZone.getID());\n }\n- \n+\n @Override\n public int hashCode() {\n return Objects.hash(unit, timeZone);\n }\n- \n+\n @Override\n public boolean equals(Object obj) {\n if (obj == null) {\n@@ -236,12 +241,12 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeVLong(interval);\n out.writeString(timeZone.getID());\n }\n- \n+\n @Override\n public int hashCode() {\n return Objects.hash(interval, timeZone);\n }\n- \n+\n @Override\n public boolean equals(Object obj) {\n if (obj == null) {", "filename": "core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java", "status": "modified" }, { "diff": "@@ -0,0 +1,75 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.common.rounding;\n+\n+import org.elasticsearch.test.ESTestCase;\n+\n+import static org.elasticsearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.QUARTER;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.MONTH_OF_YEAR;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.DAY_OF_MONTH;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.HOUR_OF_DAY;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.MINUTES_OF_HOUR;\n+import static org.elasticsearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE;\n+\n+public class DateTimeUnitTests extends ESTestCase {\n+\n+ /**\n+ * test that we don't accidentally change enum ids\n+ */\n+ public void testEnumIds() {\n+ assertEquals(1, WEEK_OF_WEEKYEAR.id());\n+ assertEquals(WEEK_OF_WEEKYEAR, DateTimeUnit.resolve((byte) 1));\n+\n+ assertEquals(2, YEAR_OF_CENTURY.id());\n+ assertEquals(YEAR_OF_CENTURY, DateTimeUnit.resolve((byte) 2));\n+\n+ assertEquals(3, QUARTER.id());\n+ assertEquals(QUARTER, DateTimeUnit.resolve((byte) 3));\n+\n+ assertEquals(4, MONTH_OF_YEAR.id());\n+ assertEquals(MONTH_OF_YEAR, DateTimeUnit.resolve((byte) 4));\n+\n+ assertEquals(5, DAY_OF_MONTH.id());\n+ assertEquals(DAY_OF_MONTH, DateTimeUnit.resolve((byte) 5));\n+\n+ assertEquals(6, HOUR_OF_DAY.id());\n+ assertEquals(HOUR_OF_DAY, DateTimeUnit.resolve((byte) 6));\n+\n+ assertEquals(7, MINUTES_OF_HOUR.id());\n+ assertEquals(MINUTES_OF_HOUR, DateTimeUnit.resolve((byte) 7));\n+\n+ assertEquals(8, SECOND_OF_MINUTE.id());\n+ assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8));\n+ }\n+\n+ public void testIsDayOrLonger() {\n+ for (DateTimeUnit unit : DateTimeUnit.values()) {\n+ if (DateTimeUnit.isDayOrLonger(unit)) {\n+ assertTrue(unit == DAY_OF_MONTH ||\n+ unit == MONTH_OF_YEAR ||\n+ unit == QUARTER ||\n+ unit == YEAR_OF_CENTURY ||\n+ unit == WEEK_OF_WEEKYEAR);\n+ }\n+ }\n+ }\n+\n+}", "filename": "core/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java", "status": "added" }, { "diff": "@@ -25,6 +25,7 @@\n import org.joda.time.DateTimeZone;\n import org.joda.time.format.ISODateTimeFormat;\n \n+import java.util.ArrayList;\n import java.util.concurrent.TimeUnit;\n \n import static org.hamcrest.Matchers.equalTo;\n@@ -147,21 +148,37 @@ public void testTimeUnitRoundingDST() {\n Rounding tzRounding;\n // testing savings to non savings switch\n tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID(\"UTC\")).build();\n- assertThat(tzRounding.round(time(\"2014-10-26T01:01:01\", DateTimeZone.forID(\"CET\"))),\n- equalTo(time(\"2014-10-26T01:00:00\", DateTimeZone.forID(\"CET\"))));\n+ assertThat(tzRounding.round(time(\"2014-10-26T01:01:01\", DateTimeZone.forOffsetHours(2))), // CEST = UTC+2\n+ equalTo(time(\"2014-10-26T01:00:00\", DateTimeZone.forOffsetHours(2))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-10-26T01:00:00\", DateTimeZone.forOffsetHours(2))),\n+ equalTo(time(\"2014-10-26T02:00:00\", DateTimeZone.forOffsetHours(2))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-10-26T02:00:00\", DateTimeZone.forOffsetHours(2))),\n+ equalTo(time(\"2014-10-26T03:00:00\", DateTimeZone.forOffsetHours(2))));\n \n tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID(\"CET\")).build();\n- assertThat(tzRounding.round(time(\"2014-10-26T01:01:01\", DateTimeZone.forID(\"CET\"))),\n- equalTo(time(\"2014-10-26T01:00:00\", DateTimeZone.forID(\"CET\"))));\n+ assertThat(tzRounding.round(time(\"2014-10-26T01:01:01\", DateTimeZone.forOffsetHours(2))), // CEST = UTC+2\n+ equalTo(time(\"2014-10-26T01:00:00\", DateTimeZone.forOffsetHours(2))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-10-26T01:00:00\", DateTimeZone.forOffsetHours(2))),\n+ equalTo(time(\"2014-10-26T02:00:00\", DateTimeZone.forOffsetHours(2))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-10-26T02:00:00\", DateTimeZone.forOffsetHours(2))),\n+ equalTo(time(\"2014-10-26T03:00:00\", DateTimeZone.forOffsetHours(2))));\n \n // testing non savings to savings switch\n tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID(\"UTC\")).build();\n- assertThat(tzRounding.round(time(\"2014-03-30T01:01:01\", DateTimeZone.forID(\"CET\"))),\n- equalTo(time(\"2014-03-30T01:00:00\", DateTimeZone.forID(\"CET\"))));\n+ assertThat(tzRounding.round(time(\"2014-03-30T01:01:01\", DateTimeZone.forOffsetHours(1))), // CET = UTC+1\n+ equalTo(time(\"2014-03-30T01:00:00\", DateTimeZone.forOffsetHours(1))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-03-30T01:00:00\", DateTimeZone.forOffsetHours(1))),\n+ equalTo(time(\"2014-03-30T02:00:00\", DateTimeZone.forOffsetHours(1))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-03-30T02:00:00\", DateTimeZone.forOffsetHours(1))),\n+ equalTo(time(\"2014-03-30T03:00:00\", DateTimeZone.forOffsetHours(1))));\n \n tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID(\"CET\")).build();\n- assertThat(tzRounding.round(time(\"2014-03-30T01:01:01\", DateTimeZone.forID(\"CET\"))),\n- equalTo(time(\"2014-03-30T01:00:00\", DateTimeZone.forID(\"CET\"))));\n+ assertThat(tzRounding.round(time(\"2014-03-30T01:01:01\", DateTimeZone.forOffsetHours(1))), // CET = UTC+1\n+ equalTo(time(\"2014-03-30T01:00:00\", DateTimeZone.forOffsetHours(1))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-03-30T01:00:00\", DateTimeZone.forOffsetHours(1))),\n+ equalTo(time(\"2014-03-30T02:00:00\", DateTimeZone.forOffsetHours(1))));\n+ assertThat(tzRounding.nextRoundingValue(time(\"2014-03-30T02:00:00\", DateTimeZone.forOffsetHours(1))),\n+ equalTo(time(\"2014-03-30T03:00:00\", DateTimeZone.forOffsetHours(1))));\n \n // testing non savings to savings switch (America/Chicago)\n tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID(\"UTC\")).build();\n@@ -210,6 +227,31 @@ public void testTimeZoneRoundingRandom() {\n }\n }\n \n+ /**\n+ * Test that nextRoundingValue() for hour rounding (and smaller) is equally spaced (see #18326)\n+ * Start at a random date in a random time zone, then find the next zone offset transition (if any).\n+ * From there, check that when we advance by using rounding#nextRoundingValue(), we always advance by the same\n+ * amount of milliseconds.\n+ */\n+ public void testSubHourNextRoundingEquallySpaced() {\n+ String timeZone = randomFrom(new ArrayList<>(DateTimeZone.getAvailableIDs()));\n+ DateTimeUnit unit = randomFrom(new DateTimeUnit[] { DateTimeUnit.HOUR_OF_DAY, DateTimeUnit.MINUTES_OF_HOUR,\n+ DateTimeUnit.SECOND_OF_MINUTE });\n+ DateTimeZone tz = DateTimeZone.forID(timeZone);\n+ TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(unit, tz);\n+ // move the random date to transition for timezones that have offset change due to dst transition\n+ long nextTransition = tz.nextTransition(Math.abs(randomLong() % ((long) 10e11)));\n+ final long millisPerUnit = unit.field().getDurationField().getUnitMillis();\n+ // start ten units before transition\n+ long roundedDate = rounding.round(nextTransition - (10 * millisPerUnit));\n+ while (roundedDate < nextTransition + 10 * millisPerUnit) {\n+ long delta = rounding.nextRoundingValue(roundedDate) - roundedDate;\n+ assertEquals(\"Difference between rounded values not equally spaced for [\" + unit.name() + \"], [\" + timeZone + \"] at \"\n+ + new DateTime(roundedDate), millisPerUnit, delta);\n+ roundedDate = rounding.nextRoundingValue(roundedDate);\n+ }\n+ }\n+\n /**\n * randomized test on TimeIntervalRounding with random interval and time zone offsets\n */", "filename": "core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import org.elasticsearch.common.joda.Joda;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.index.mapper.core.DateFieldMapper;\n+import org.elasticsearch.index.query.MatchNoneQueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.plugins.Plugin;\n import org.elasticsearch.script.Script;\n@@ -1146,4 +1147,27 @@ public void testTimestampField() { // see #11692\n Histogram histo = response.getAggregations().get(\"histo\");\n assertThat(histo.getBuckets().size(), greaterThan(0));\n }\n+\n+ /**\n+ * When DST ends, local time turns back one hour, so between 2am and 4am wall time we should have four buckets:\n+ * \"2015-10-25T02:00:00.000+02:00\",\n+ * \"2015-10-25T02:00:00.000+01:00\",\n+ * \"2015-10-25T03:00:00.000+01:00\",\n+ * \"2015-10-25T04:00:00.000+01:00\".\n+ */\n+ public void testDSTEndTransition() throws Exception {\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(new MatchNoneQueryBuilder())\n+ .addAggregation(dateHistogram(\"histo\").field(\"date\").timeZone(DateTimeZone.forID(\"Europe/Oslo\"))\n+ .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds(\n+ new ExtendedBounds(\"2015-10-25T02:00:00.000+02:00\", \"2015-10-25T04:00:00.000+01:00\")))\n+ .execute().actionGet();\n+\n+ Histogram histo = response.getAggregations().get(\"histo\");\n+ List<? extends Bucket> buckets = histo.getBuckets();\n+ assertThat(buckets.size(), equalTo(4));\n+ assertThat(((DateTime) buckets.get(1).getKey()).getMillis() - ((DateTime) buckets.get(0).getKey()).getMillis(), equalTo(3600000L));\n+ assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L));\n+ assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L));\n+ }\n }", "filename": "core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java", "status": "modified" } ] }
{ "body": "While working on indify string concats for painless, I noticed that some statements take veeery long to compile. Some background:\n\nIndy string concats only allow a maximum of 200 parts per invokedynamic. To test the code to allow more (using intermediate results), I added a simple statement with 211 items and let painless parse it:\n\n```\nString s = \"cat\"; return s + \"000\".toString() + \"001\".toString() + ... + \"209\".toString();\n```\n\nThis takes on painless on my computer up to 300 seconds. If I remove the `.toString()` calls, it gets a bit faster, but the result is that strings are already merged while compiling (so the `.toString()` is just a workaround to prevent concats on compilation: I did not find a test for that, its cool that painless does this!). It gets even slower if you instead cast every single string to `(def)`, its still works and works and works since 1000s.\n\nLooks like there is some exponential runtime problem.\n\nThis is my test code:\n\n``` java\n public void testAppendMany() {\n StringBuilder script = new StringBuilder(\"String s = \\\"cat\\\"; return s\");\n StringBuilder result = new StringBuilder(\"cat\");\n for (int i = 0; i < WriterConstants.MAX_INDY_STRING_CONCAT_ARGS + 10; i++) {\n final String s = String.format(Locale.ROOT, \"%03d\", i);\n script.append(\" + '\").append(s).append(\"'.toString()\");\n result.append(s);\n }\n System.out.println(Debugger.toString(script.toString()));\n //assertEquals(result.toString(), exec(script.toString()));\n }\n```\n", "comments": [ { "body": "FYI, the resulting bytecode with Java 9 and indyfied string concat looks like - (I will open a PR about that soon!):\n\n```\n public execute(Ljava/util/Map;Lorg/apache/lucene/search/Scorer;Lorg/elasticsearch/search/lookup/LeafDocLookup;Ljava/lang/Object;)Ljava/lang/Object;\n L0\n LINENUMBER 1 L0\n LDC \"cat\"\n ASTORE 5\n L1\n LINENUMBER 1 L1\n ALOAD 5\n LDC \"000\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"001\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"002\"\n[...]\n LDC \"198\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n INVOKEDYNAMIC concat(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; [\n // handle kind 0x6 : INVOKESTATIC\n java/lang/invoke/StringConcatFactory.makeConcat(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;\n // arguments: none\n ]\n LDC \"199\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"200\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"201\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"202\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"203\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"204\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"205\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"206\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"207\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"208\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n LDC \"209\"\n INVOKEVIRTUAL java/lang/String.toString ()Ljava/lang/String;\n INVOKEDYNAMIC concat(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; [\n // handle kind 0x6 : INVOKESTATIC\n java/lang/invoke/StringConcatFactory.makeConcat(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;\n // arguments: none\n ]\n ARETURN\n MAXSTACK = 200\n MAXLOCALS = 6\n```\n", "created_at": "2016-05-17T11:27:13Z" }, { "body": "@clintongormley this is a bug not an enhancement!\n", "created_at": "2016-05-17T11:33:52Z" }, { "body": "Dutifully updated :)\n", "created_at": "2016-05-17T11:34:42Z" }, { "body": "Thanks!\n", "created_at": "2016-05-17T11:35:35Z" }, { "body": "I ran profiler and it looks to me the issue is caused by the grammar. I added DiagnosticErrorListener and ambiguity detection and it prints:\n\n```\nline 1:18 reportAttemptingFullContext d=15 (statement), input='Strings=\"cat\";return'\nline 1:16 reportAmbiguity d=15 (statement): ambigAlts={5, 11}, input='Strings=\"cat\";'\nline 1:4016 reportAttemptingFullContext d=28 (expression), input='+'000'.toString()+'001'.toString()+'002'.toString()+...+'209.toString()\n```\n\nThe \"...\" are mine but it shows all 210 strings.\n", "created_at": "2016-05-17T13:35:14Z" }, { "body": "The test also runs in 3-5 seconds instead of hundreds of seconds if i set SLL prediction mode. I don't think we should solve it that way (even though tests pass), instead we should figure out how to fix the ambiguity...\n", "created_at": "2016-05-17T13:47:21Z" }, { "body": "The issue is caused by making semicolons optional everywhere in the grammar. Fixing that makes the test run instantly.\n", "created_at": "2016-05-17T13:59:16Z" }, { "body": "Also by fixing the semicolon issue, i get no more ambiguity warnings with this hack. So this is the real bad guy. I will make a PR to fix this. We can make semicolons mandatory and be fast. If we want to make them optional in some way, we should do it in a way that is not a performance killer, or we don't do it. This isn't groovy.\n\n```\ndiff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java\nindex 4f6e2f5..c776245 100644\n--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java\n+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java\n@@ -21,7 +21,9 @@ package org.elasticsearch.painless.antlr;\n\n import org.antlr.v4.runtime.ANTLRInputStream;\n import org.antlr.v4.runtime.CommonTokenStream;\n+import org.antlr.v4.runtime.DiagnosticErrorListener;\n import org.antlr.v4.runtime.ParserRuleContext;\n+import org.antlr.v4.runtime.atn.PredictionMode;\n import org.elasticsearch.painless.Operation;\n import org.elasticsearch.painless.Variables.Reserved;\n import org.elasticsearch.painless.antlr.PainlessParser.AfterthoughtContext;\n@@ -139,8 +141,10 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {\n final PainlessParser parser = new PainlessParser(new CommonTokenStream(lexer));\n final ParserErrorStrategy strategy = new ParserErrorStrategy();\n\n- lexer.removeErrorListeners();\n- parser.removeErrorListeners();\n+ //lexer.removeErrorListeners();\n+ //parser.removeErrorListeners();\n+ parser.addErrorListener(new DiagnosticErrorListener());\n+ parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);\n parser.setErrorHandler(strategy);\n\n return parser.source();\n```\n", "created_at": "2016-05-17T14:02:53Z" }, { "body": "Thanks for reporting this @uschindler !\n", "created_at": "2016-05-17T16:20:35Z" } ], "number": 18398, "title": "Compilation horrible slow on some huge statements" }
{ "body": "The issue is caused by too much ambiguity in the grammar, particularly the fact that semicolons aren't needed to separate statements.\n\nInstead, just make the semicolon optional at the very end (in other words, `EOF` is equivalent). This means that very simple one liner cases like `5` or `_score + log(doc['pagerank'].value)` or whatever do not need semicolons. But you can't do `int x = y int z = x + 5 return z` anymore. I really don't think we should be allowing that anyway.\n\nCloses #18398\n", "number": 18410, "review_comments": [], "title": "Fix insanely slow compilation" }
{ "commits": [ { "message": "make semicolons only optional at the end" }, { "message": "fix example in doc" } ], "files": [ { "diff": "@@ -189,7 +189,7 @@ POST hockey/player/1/_update\n {\n \"script\": {\n \"lang\": \"painless\",\n- \"inline\": \"ctx._source.last = params.last ctx._source.nick = params.nick\",\n+ \"inline\": \"ctx._source.last = params.last; ctx._source.nick = params.nick\",\n \"params\": {\n \"last\": \"gaudreau\",\n \"nick\": \"hockey\"", "filename": "docs/reference/modules/scripting/painless.asciidoc", "status": "modified" }, { "diff": "@@ -28,15 +28,15 @@ source\n statement\n : IF LP expression RP block ( ELSE block )? # if\n | WHILE LP expression RP ( block | empty ) # while\n- | DO block WHILE LP expression RP SEMICOLON? # do\n+ | DO block WHILE LP expression RP ( SEMICOLON | EOF ) # do\n | FOR LP initializer? SEMICOLON expression? SEMICOLON afterthought? RP ( block | empty ) # for\n- | declaration SEMICOLON? # decl\n- | CONTINUE SEMICOLON? # continue\n- | BREAK SEMICOLON? # break\n- | RETURN expression SEMICOLON? # return\n+ | declaration ( SEMICOLON | EOF ) # decl\n+ | CONTINUE ( SEMICOLON | EOF ) # continue\n+ | BREAK ( SEMICOLON | EOF ) # break\n+ | RETURN expression ( SEMICOLON | EOF ) # return\n | TRY block trap+ # try\n- | THROW expression SEMICOLON? # throw\n- | expression SEMICOLON? # expr\n+ | THROW expression ( SEMICOLON | EOF ) # throw\n+ | expression ( SEMICOLON | EOF ) # expr\n ;\n \n block", "filename": "modules/lang-painless/src/main/antlr/PainlessParser.g4", "status": "modified" }, { "diff": "@@ -182,6 +182,7 @@ public DeclarationContext declaration() {\n return getRuleContext(DeclarationContext.class,0);\n }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public DeclContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -192,6 +193,7 @@ public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n public static class BreakContext extends StatementContext {\n public TerminalNode BREAK() { return getToken(PainlessParser.BREAK, 0); }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public BreakContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -205,6 +207,7 @@ public ExpressionContext expression() {\n return getRuleContext(ExpressionContext.class,0);\n }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public ThrowContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -215,6 +218,7 @@ public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n public static class ContinueContext extends StatementContext {\n public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public ContinueContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -275,6 +279,7 @@ public ExpressionContext expression() {\n return getRuleContext(ExpressionContext.class,0);\n }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public ExprContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -294,6 +299,7 @@ public ExpressionContext expression() {\n }\n public TerminalNode RP() { return getToken(PainlessParser.RP, 0); }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public DoContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -348,6 +354,7 @@ public ExpressionContext expression() {\n return getRuleContext(ExpressionContext.class,0);\n }\n public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); }\n+ public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); }\n public ReturnContext(StatementContext ctx) { copyFrom(ctx); }\n @Override\n public <T> T accept(ParseTreeVisitor<? extends T> visitor) {\n@@ -362,8 +369,8 @@ public final StatementContext statement() throws RecognitionException {\n int _la;\n try {\n int _alt;\n- setState(134);\n- switch ( getInterpreter().adaptivePredict(_input,15,_ctx) ) {\n+ setState(125);\n+ switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) {\n case 1:\n _localctx = new IfContext(_localctx);\n enterOuterAlt(_localctx, 1);\n@@ -436,69 +443,67 @@ public final StatementContext statement() throws RecognitionException {\n expression(0);\n setState(79);\n match(RP);\n- setState(81);\n+ setState(80);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(80);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n case 4:\n _localctx = new ForContext(_localctx);\n enterOuterAlt(_localctx, 4);\n {\n- setState(83);\n+ setState(82);\n match(FOR);\n- setState(84);\n+ setState(83);\n match(LP);\n- setState(86);\n+ setState(85);\n _la = _input.LA(1);\n if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (ID - 64)))) != 0)) {\n {\n- setState(85);\n+ setState(84);\n initializer();\n }\n }\n \n- setState(88);\n+ setState(87);\n match(SEMICOLON);\n- setState(90);\n+ setState(89);\n _la = _input.LA(1);\n if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (ID - 64)))) != 0)) {\n {\n- setState(89);\n+ setState(88);\n expression(0);\n }\n }\n \n- setState(92);\n+ setState(91);\n match(SEMICOLON);\n- setState(94);\n+ setState(93);\n _la = _input.LA(1);\n if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (ID - 64)))) != 0)) {\n {\n- setState(93);\n+ setState(92);\n afterthought();\n }\n }\n \n- setState(96);\n+ setState(95);\n match(RP);\n- setState(99);\n- switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) {\n+ setState(98);\n+ switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) {\n case 1:\n {\n- setState(97);\n+ setState(96);\n block();\n }\n break;\n case 2:\n {\n- setState(98);\n+ setState(97);\n empty();\n }\n break;\n@@ -509,136 +514,124 @@ public final StatementContext statement() throws RecognitionException {\n _localctx = new DeclContext(_localctx);\n enterOuterAlt(_localctx, 5);\n {\n- setState(101);\n+ setState(100);\n declaration();\n- setState(103);\n+ setState(101);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(102);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n case 6:\n _localctx = new ContinueContext(_localctx);\n enterOuterAlt(_localctx, 6);\n {\n- setState(105);\n+ setState(103);\n match(CONTINUE);\n- setState(107);\n+ setState(104);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(106);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n case 7:\n _localctx = new BreakContext(_localctx);\n enterOuterAlt(_localctx, 7);\n {\n- setState(109);\n+ setState(105);\n match(BREAK);\n- setState(111);\n+ setState(106);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(110);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n case 8:\n _localctx = new ReturnContext(_localctx);\n enterOuterAlt(_localctx, 8);\n {\n- setState(113);\n+ setState(107);\n match(RETURN);\n- setState(114);\n+ setState(108);\n expression(0);\n- setState(116);\n+ setState(109);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(115);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n case 9:\n _localctx = new TryContext(_localctx);\n enterOuterAlt(_localctx, 9);\n {\n- setState(118);\n+ setState(111);\n match(TRY);\n- setState(119);\n+ setState(112);\n block();\n- setState(121); \n+ setState(114); \n _errHandler.sync(this);\n _alt = 1;\n do {\n switch (_alt) {\n case 1:\n {\n {\n- setState(120);\n+ setState(113);\n trap();\n }\n }\n break;\n default:\n throw new NoViableAltException(this);\n }\n- setState(123); \n+ setState(116); \n _errHandler.sync(this);\n- _alt = getInterpreter().adaptivePredict(_input,12,_ctx);\n+ _alt = getInterpreter().adaptivePredict(_input,7,_ctx);\n } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER );\n }\n break;\n case 10:\n _localctx = new ThrowContext(_localctx);\n enterOuterAlt(_localctx, 10);\n {\n- setState(125);\n+ setState(118);\n match(THROW);\n- setState(126);\n+ setState(119);\n expression(0);\n- setState(128);\n+ setState(120);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(127);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n case 11:\n _localctx = new ExprContext(_localctx);\n enterOuterAlt(_localctx, 11);\n {\n- setState(130);\n+ setState(122);\n expression(0);\n- setState(132);\n+ setState(123);\n _la = _input.LA(1);\n- if (_la==SEMICOLON) {\n- {\n- setState(131);\n- match(SEMICOLON);\n- }\n+ if ( !(_la==EOF || _la==SEMICOLON) ) {\n+ _errHandler.recoverInline(this);\n+ } else {\n+ consume();\n }\n-\n }\n break;\n }\n@@ -698,29 +691,29 @@ public final BlockContext block() throws RecognitionException {\n enterRule(_localctx, 4, RULE_block);\n int _la;\n try {\n- setState(145);\n+ setState(136);\n switch (_input.LA(1)) {\n case LBRACK:\n _localctx = new MultipleContext(_localctx);\n enterOuterAlt(_localctx, 1);\n {\n- setState(136);\n+ setState(127);\n match(LBRACK);\n- setState(138); \n+ setState(129); \n _errHandler.sync(this);\n _la = _input.LA(1);\n do {\n {\n {\n- setState(137);\n+ setState(128);\n statement();\n }\n }\n- setState(140); \n+ setState(131); \n _errHandler.sync(this);\n _la = _input.LA(1);\n } while ( (((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (ID - 64)))) != 0) );\n- setState(142);\n+ setState(133);\n match(RBRACK);\n }\n break;\n@@ -753,7 +746,7 @@ public final BlockContext block() throws RecognitionException {\n _localctx = new SingleContext(_localctx);\n enterOuterAlt(_localctx, 2);\n {\n- setState(144);\n+ setState(135);\n statement();\n }\n break;\n@@ -792,19 +785,19 @@ public final EmptyContext empty() throws RecognitionException {\n EmptyContext _localctx = new EmptyContext(_ctx, getState());\n enterRule(_localctx, 6, RULE_empty);\n try {\n- setState(149);\n+ setState(140);\n switch (_input.LA(1)) {\n case LBRACK:\n enterOuterAlt(_localctx, 1);\n {\n- setState(147);\n+ setState(138);\n emptyscope();\n }\n break;\n case SEMICOLON:\n enterOuterAlt(_localctx, 2);\n {\n- setState(148);\n+ setState(139);\n match(SEMICOLON);\n }\n break;\n@@ -843,9 +836,9 @@ public final EmptyscopeContext emptyscope() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(151);\n+ setState(142);\n match(LBRACK);\n- setState(152);\n+ setState(143);\n match(RBRACK);\n }\n }\n@@ -882,19 +875,19 @@ public final InitializerContext initializer() throws RecognitionException {\n InitializerContext _localctx = new InitializerContext(_ctx, getState());\n enterRule(_localctx, 10, RULE_initializer);\n try {\n- setState(156);\n- switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) {\n+ setState(147);\n+ switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) {\n case 1:\n enterOuterAlt(_localctx, 1);\n {\n- setState(154);\n+ setState(145);\n declaration();\n }\n break;\n case 2:\n enterOuterAlt(_localctx, 2);\n {\n- setState(155);\n+ setState(146);\n expression(0);\n }\n break;\n@@ -932,7 +925,7 @@ public final AfterthoughtContext afterthought() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(158);\n+ setState(149);\n expression(0);\n }\n }\n@@ -979,23 +972,23 @@ public final DeclarationContext declaration() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(160);\n+ setState(151);\n decltype();\n- setState(161);\n+ setState(152);\n declvar();\n- setState(166);\n+ setState(157);\n _errHandler.sync(this);\n _la = _input.LA(1);\n while (_la==COMMA) {\n {\n {\n- setState(162);\n+ setState(153);\n match(COMMA);\n- setState(163);\n+ setState(154);\n declvar();\n }\n }\n- setState(168);\n+ setState(159);\n _errHandler.sync(this);\n _la = _input.LA(1);\n }\n@@ -1042,21 +1035,21 @@ public final DecltypeContext decltype() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(169);\n+ setState(160);\n identifier();\n- setState(174);\n+ setState(165);\n _errHandler.sync(this);\n _la = _input.LA(1);\n while (_la==LBRACE) {\n {\n {\n- setState(170);\n+ setState(161);\n match(LBRACE);\n- setState(171);\n+ setState(162);\n match(RBRACE);\n }\n }\n- setState(176);\n+ setState(167);\n _errHandler.sync(this);\n _la = _input.LA(1);\n }\n@@ -1099,15 +1092,15 @@ public final DeclvarContext declvar() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(177);\n+ setState(168);\n identifier();\n- setState(180);\n+ setState(171);\n _la = _input.LA(1);\n if (_la==ASSIGN) {\n {\n- setState(178);\n+ setState(169);\n match(ASSIGN);\n- setState(179);\n+ setState(170);\n expression(0);\n }\n }\n@@ -1158,29 +1151,29 @@ public final TrapContext trap() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(182);\n+ setState(173);\n match(CATCH);\n- setState(183);\n+ setState(174);\n match(LP);\n {\n- setState(184);\n+ setState(175);\n identifier();\n- setState(185);\n+ setState(176);\n identifier();\n }\n- setState(187);\n+ setState(178);\n match(RP);\n- setState(190);\n- switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) {\n+ setState(181);\n+ switch ( getInterpreter().adaptivePredict(_input,16,_ctx) ) {\n case 1:\n {\n- setState(188);\n+ setState(179);\n block();\n }\n break;\n case 2:\n {\n- setState(189);\n+ setState(180);\n emptyscope();\n }\n break;\n@@ -1220,13 +1213,13 @@ public final IdentifierContext identifier() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(192);\n+ setState(183);\n match(ID);\n- setState(194);\n- switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) {\n+ setState(185);\n+ switch ( getInterpreter().adaptivePredict(_input,17,_ctx) ) {\n case 1:\n {\n- setState(193);\n+ setState(184);\n generic();\n }\n break;\n@@ -1275,27 +1268,27 @@ public final GenericContext generic() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(196);\n+ setState(187);\n match(LT);\n- setState(197);\n+ setState(188);\n identifier();\n- setState(202);\n+ setState(193);\n _errHandler.sync(this);\n _la = _input.LA(1);\n while (_la==COMMA) {\n {\n {\n- setState(198);\n+ setState(189);\n match(COMMA);\n- setState(199);\n+ setState(190);\n identifier();\n }\n }\n- setState(204);\n+ setState(195);\n _errHandler.sync(this);\n _la = _input.LA(1);\n }\n- setState(205);\n+ setState(196);\n match(GT);\n }\n }\n@@ -1563,22 +1556,22 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n int _alt;\n enterOuterAlt(_localctx, 1);\n {\n- setState(233);\n- switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) {\n+ setState(224);\n+ switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) {\n case 1:\n {\n _localctx = new UnaryContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n \n- setState(208);\n+ setState(199);\n _la = _input.LA(1);\n if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(209);\n+ setState(200);\n expression(14);\n }\n break;\n@@ -1587,13 +1580,13 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new CastContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(210);\n+ setState(201);\n match(LP);\n- setState(211);\n+ setState(202);\n decltype();\n- setState(212);\n+ setState(203);\n match(RP);\n- setState(213);\n+ setState(204);\n expression(13);\n }\n break;\n@@ -1602,16 +1595,16 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new AssignmentContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(215);\n+ setState(206);\n chain();\n- setState(216);\n+ setState(207);\n _la = _input.LA(1);\n if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ASSIGN) | (1L << AADD) | (1L << ASUB) | (1L << AMUL) | (1L << ADIV) | (1L << AREM) | (1L << AAND) | (1L << AXOR) | (1L << AOR) | (1L << ALSH) | (1L << ARSH) | (1L << AUSH))) != 0)) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(217);\n+ setState(208);\n expression(1);\n }\n break;\n@@ -1620,11 +1613,11 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new PrecedenceContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(219);\n+ setState(210);\n match(LP);\n- setState(220);\n+ setState(211);\n expression(0);\n- setState(221);\n+ setState(212);\n match(RP);\n }\n break;\n@@ -1633,7 +1626,7 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new NumericContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(223);\n+ setState(214);\n _la = _input.LA(1);\n if ( !(((((_la - 63)) & ~0x3f) == 0 && ((1L << (_la - 63)) & ((1L << (OCTAL - 63)) | (1L << (HEX - 63)) | (1L << (INTEGER - 63)) | (1L << (DECIMAL - 63)))) != 0)) ) {\n _errHandler.recoverInline(this);\n@@ -1647,7 +1640,7 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new TrueContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(224);\n+ setState(215);\n match(TRUE);\n }\n break;\n@@ -1656,7 +1649,7 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new FalseContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(225);\n+ setState(216);\n match(FALSE);\n }\n break;\n@@ -1665,7 +1658,7 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new NullContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(226);\n+ setState(217);\n match(NULL);\n }\n break;\n@@ -1674,9 +1667,9 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new PostincContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(227);\n+ setState(218);\n chain();\n- setState(228);\n+ setState(219);\n _la = _input.LA(1);\n if ( !(_la==INCR || _la==DECR) ) {\n _errHandler.recoverInline(this);\n@@ -1690,14 +1683,14 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new PreincContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(230);\n+ setState(221);\n _la = _input.LA(1);\n if ( !(_la==INCR || _la==DECR) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(231);\n+ setState(222);\n chain();\n }\n break;\n@@ -1706,189 +1699,189 @@ private ExpressionContext expression(int _p) throws RecognitionException {\n _localctx = new ReadContext(_localctx);\n _ctx = _localctx;\n _prevctx = _localctx;\n- setState(232);\n+ setState(223);\n chain();\n }\n break;\n }\n _ctx.stop = _input.LT(-1);\n- setState(273);\n+ setState(264);\n _errHandler.sync(this);\n- _alt = getInterpreter().adaptivePredict(_input,28,_ctx);\n+ _alt = getInterpreter().adaptivePredict(_input,21,_ctx);\n while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {\n if ( _alt==1 ) {\n if ( _parseListeners!=null ) triggerExitRuleEvent();\n _prevctx = _localctx;\n {\n- setState(271);\n- switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) {\n+ setState(262);\n+ switch ( getInterpreter().adaptivePredict(_input,20,_ctx) ) {\n case 1:\n {\n _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(235);\n+ setState(226);\n if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, \"precpred(_ctx, 12)\");\n- setState(236);\n+ setState(227);\n _la = _input.LA(1);\n if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(237);\n+ setState(228);\n expression(13);\n }\n break;\n case 2:\n {\n _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(238);\n+ setState(229);\n if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, \"precpred(_ctx, 11)\");\n- setState(239);\n+ setState(230);\n _la = _input.LA(1);\n if ( !(_la==ADD || _la==SUB) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(240);\n+ setState(231);\n expression(12);\n }\n break;\n case 3:\n {\n _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(241);\n+ setState(232);\n if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, \"precpred(_ctx, 10)\");\n- setState(242);\n+ setState(233);\n _la = _input.LA(1);\n if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(243);\n+ setState(234);\n expression(11);\n }\n break;\n case 4:\n {\n _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(244);\n+ setState(235);\n if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, \"precpred(_ctx, 9)\");\n- setState(245);\n+ setState(236);\n _la = _input.LA(1);\n if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(246);\n+ setState(237);\n expression(10);\n }\n break;\n case 5:\n {\n _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(247);\n+ setState(238);\n if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, \"precpred(_ctx, 8)\");\n- setState(248);\n+ setState(239);\n _la = _input.LA(1);\n if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(249);\n+ setState(240);\n expression(9);\n }\n break;\n case 6:\n {\n _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(250);\n+ setState(241);\n if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, \"precpred(_ctx, 7)\");\n- setState(251);\n+ setState(242);\n match(BWAND);\n- setState(252);\n+ setState(243);\n expression(8);\n }\n break;\n case 7:\n {\n _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(253);\n+ setState(244);\n if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, \"precpred(_ctx, 6)\");\n- setState(254);\n+ setState(245);\n match(XOR);\n- setState(255);\n+ setState(246);\n expression(7);\n }\n break;\n case 8:\n {\n _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(256);\n+ setState(247);\n if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, \"precpred(_ctx, 5)\");\n- setState(257);\n+ setState(248);\n match(BWOR);\n- setState(258);\n+ setState(249);\n expression(6);\n }\n break;\n case 9:\n {\n _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(259);\n+ setState(250);\n if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, \"precpred(_ctx, 4)\");\n- setState(260);\n+ setState(251);\n match(BOOLAND);\n- setState(261);\n+ setState(252);\n expression(5);\n }\n break;\n case 10:\n {\n _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(262);\n+ setState(253);\n if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, \"precpred(_ctx, 3)\");\n- setState(263);\n+ setState(254);\n match(BOOLOR);\n- setState(264);\n+ setState(255);\n expression(4);\n }\n break;\n case 11:\n {\n _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState));\n pushNewRecursionContext(_localctx, _startState, RULE_expression);\n- setState(265);\n+ setState(256);\n if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, \"precpred(_ctx, 2)\");\n- setState(266);\n+ setState(257);\n match(COND);\n- setState(267);\n+ setState(258);\n expression(0);\n- setState(268);\n+ setState(259);\n match(COLON);\n- setState(269);\n+ setState(260);\n expression(2);\n }\n break;\n }\n } \n }\n- setState(275);\n+ setState(266);\n _errHandler.sync(this);\n- _alt = getInterpreter().adaptivePredict(_input,28,_ctx);\n+ _alt = getInterpreter().adaptivePredict(_input,21,_ctx);\n }\n }\n }\n@@ -1934,40 +1927,40 @@ public final ChainContext chain() throws RecognitionException {\n ChainContext _localctx = new ChainContext(_ctx, getState());\n enterRule(_localctx, 28, RULE_chain);\n try {\n- setState(281);\n- switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) {\n+ setState(272);\n+ switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) {\n case 1:\n enterOuterAlt(_localctx, 1);\n {\n- setState(276);\n+ setState(267);\n linkprec();\n }\n break;\n case 2:\n enterOuterAlt(_localctx, 2);\n {\n- setState(277);\n+ setState(268);\n linkcast();\n }\n break;\n case 3:\n enterOuterAlt(_localctx, 3);\n {\n- setState(278);\n+ setState(269);\n linkvar();\n }\n break;\n case 4:\n enterOuterAlt(_localctx, 4);\n {\n- setState(279);\n+ setState(270);\n linknew();\n }\n break;\n case 5:\n enterOuterAlt(_localctx, 5);\n {\n- setState(280);\n+ setState(271);\n linkstring();\n }\n break;\n@@ -2025,54 +2018,54 @@ public final LinkprecContext linkprec() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(283);\n+ setState(274);\n match(LP);\n- setState(289);\n- switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) {\n+ setState(280);\n+ switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) {\n case 1:\n {\n- setState(284);\n+ setState(275);\n linkprec();\n }\n break;\n case 2:\n {\n- setState(285);\n+ setState(276);\n linkcast();\n }\n break;\n case 3:\n {\n- setState(286);\n+ setState(277);\n linkvar();\n }\n break;\n case 4:\n {\n- setState(287);\n+ setState(278);\n linknew();\n }\n break;\n case 5:\n {\n- setState(288);\n+ setState(279);\n linkstring();\n }\n break;\n }\n- setState(291);\n+ setState(282);\n match(RP);\n- setState(294);\n- switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) {\n+ setState(285);\n+ switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) {\n case 1:\n {\n- setState(292);\n+ setState(283);\n linkdot();\n }\n break;\n case 2:\n {\n- setState(293);\n+ setState(284);\n linkbrace();\n }\n break;\n@@ -2128,41 +2121,41 @@ public final LinkcastContext linkcast() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(296);\n+ setState(287);\n match(LP);\n- setState(297);\n+ setState(288);\n decltype();\n- setState(298);\n+ setState(289);\n match(RP);\n- setState(304);\n- switch ( getInterpreter().adaptivePredict(_input,32,_ctx) ) {\n+ setState(295);\n+ switch ( getInterpreter().adaptivePredict(_input,25,_ctx) ) {\n case 1:\n {\n- setState(299);\n+ setState(290);\n linkprec();\n }\n break;\n case 2:\n {\n- setState(300);\n+ setState(291);\n linkcast();\n }\n break;\n case 3:\n {\n- setState(301);\n+ setState(292);\n linkvar();\n }\n break;\n case 4:\n {\n- setState(302);\n+ setState(293);\n linknew();\n }\n break;\n case 5:\n {\n- setState(303);\n+ setState(294);\n linkstring();\n }\n break;\n@@ -2209,23 +2202,23 @@ public final LinkbraceContext linkbrace() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(306);\n+ setState(297);\n match(LBRACE);\n- setState(307);\n+ setState(298);\n expression(0);\n- setState(308);\n+ setState(299);\n match(RBRACE);\n- setState(311);\n- switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) {\n+ setState(302);\n+ switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) {\n case 1:\n {\n- setState(309);\n+ setState(300);\n linkdot();\n }\n break;\n case 2:\n {\n- setState(310);\n+ setState(301);\n linkbrace();\n }\n break;\n@@ -2268,19 +2261,19 @@ public final LinkdotContext linkdot() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(313);\n+ setState(304);\n match(DOT);\n- setState(316);\n- switch ( getInterpreter().adaptivePredict(_input,34,_ctx) ) {\n+ setState(307);\n+ switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) {\n case 1:\n {\n- setState(314);\n+ setState(305);\n linkcall();\n }\n break;\n case 2:\n {\n- setState(315);\n+ setState(306);\n linkfield();\n }\n break;\n@@ -2326,21 +2319,21 @@ public final LinkcallContext linkcall() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(318);\n+ setState(309);\n match(EXTID);\n- setState(319);\n+ setState(310);\n arguments();\n- setState(322);\n- switch ( getInterpreter().adaptivePredict(_input,35,_ctx) ) {\n+ setState(313);\n+ switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) {\n case 1:\n {\n- setState(320);\n+ setState(311);\n linkdot();\n }\n break;\n case 2:\n {\n- setState(321);\n+ setState(312);\n linkbrace();\n }\n break;\n@@ -2385,19 +2378,19 @@ public final LinkvarContext linkvar() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(324);\n+ setState(315);\n identifier();\n- setState(327);\n- switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) {\n+ setState(318);\n+ switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) {\n case 1:\n {\n- setState(325);\n+ setState(316);\n linkdot();\n }\n break;\n case 2:\n {\n- setState(326);\n+ setState(317);\n linkbrace();\n }\n break;\n@@ -2442,24 +2435,24 @@ public final LinkfieldContext linkfield() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(329);\n+ setState(320);\n _la = _input.LA(1);\n if ( !(_la==EXTINTEGER || _la==EXTID) ) {\n _errHandler.recoverInline(this);\n } else {\n consume();\n }\n- setState(332);\n- switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) {\n+ setState(323);\n+ switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) {\n case 1:\n {\n- setState(330);\n+ setState(321);\n linkdot();\n }\n break;\n case 2:\n {\n- setState(331);\n+ setState(322);\n linkbrace();\n }\n break;\n@@ -2520,22 +2513,22 @@ public final LinknewContext linknew() throws RecognitionException {\n int _alt;\n enterOuterAlt(_localctx, 1);\n {\n- setState(334);\n+ setState(325);\n match(NEW);\n- setState(335);\n+ setState(326);\n identifier();\n- setState(351);\n+ setState(342);\n switch (_input.LA(1)) {\n case LP:\n {\n {\n- setState(336);\n+ setState(327);\n arguments();\n- setState(338);\n- switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) {\n+ setState(329);\n+ switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) {\n case 1:\n {\n- setState(337);\n+ setState(328);\n linkdot();\n }\n break;\n@@ -2546,35 +2539,35 @@ public final LinknewContext linknew() throws RecognitionException {\n case LBRACE:\n {\n {\n- setState(344); \n+ setState(335); \n _errHandler.sync(this);\n _alt = 1;\n do {\n switch (_alt) {\n case 1:\n {\n {\n- setState(340);\n+ setState(331);\n match(LBRACE);\n- setState(341);\n+ setState(332);\n expression(0);\n- setState(342);\n+ setState(333);\n match(RBRACE);\n }\n }\n break;\n default:\n throw new NoViableAltException(this);\n }\n- setState(346); \n+ setState(337); \n _errHandler.sync(this);\n- _alt = getInterpreter().adaptivePredict(_input,39,_ctx);\n+ _alt = getInterpreter().adaptivePredict(_input,32,_ctx);\n } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER );\n- setState(349);\n- switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) {\n+ setState(340);\n+ switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) {\n case 1:\n {\n- setState(348);\n+ setState(339);\n linkdot();\n }\n break;\n@@ -2623,19 +2616,19 @@ public final LinkstringContext linkstring() throws RecognitionException {\n try {\n enterOuterAlt(_localctx, 1);\n {\n- setState(353);\n+ setState(344);\n match(STRING);\n- setState(356);\n- switch ( getInterpreter().adaptivePredict(_input,42,_ctx) ) {\n+ setState(347);\n+ switch ( getInterpreter().adaptivePredict(_input,35,_ctx) ) {\n case 1:\n {\n- setState(354);\n+ setState(345);\n linkdot();\n }\n break;\n case 2:\n {\n- setState(355);\n+ setState(346);\n linkbrace();\n }\n break;\n@@ -2685,34 +2678,34 @@ public final ArgumentsContext arguments() throws RecognitionException {\n enterOuterAlt(_localctx, 1);\n {\n {\n- setState(358);\n+ setState(349);\n match(LP);\n- setState(367);\n+ setState(358);\n _la = _input.LA(1);\n if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (ID - 64)))) != 0)) {\n {\n- setState(359);\n+ setState(350);\n expression(0);\n- setState(364);\n+ setState(355);\n _errHandler.sync(this);\n _la = _input.LA(1);\n while (_la==COMMA) {\n {\n {\n- setState(360);\n+ setState(351);\n match(COMMA);\n- setState(361);\n+ setState(352);\n expression(0);\n }\n }\n- setState(366);\n+ setState(357);\n _errHandler.sync(this);\n _la = _input.LA(1);\n }\n }\n }\n \n- setState(369);\n+ setState(360);\n match(RP);\n }\n }\n@@ -2764,150 +2757,145 @@ private boolean expression_sempred(ExpressionContext _localctx, int predIndex) {\n }\n \n public static final String _serializedATN =\n- \"\\3\\u0430\\ud6d1\\u8206\\uad2d\\u4417\\uaef1\\u8d80\\uaadd\\3K\\u0176\\4\\2\\t\\2\\4\"+\n+ \"\\3\\u0430\\ud6d1\\u8206\\uad2d\\u4417\\uaef1\\u8d80\\uaadd\\3K\\u016d\\4\\2\\t\\2\\4\"+\n \"\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\"+\n \"\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\"+\n \"\\4\\23\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\\t\\31\"+\n \"\\4\\32\\t\\32\\3\\2\\6\\2\\66\\n\\2\\r\\2\\16\\2\\67\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\"+\n \"\\3\\3\\5\\3C\\n\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\5\\3K\\n\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\"+\n- \"\\3\\3\\5\\3T\\n\\3\\3\\3\\3\\3\\3\\3\\5\\3Y\\n\\3\\3\\3\\3\\3\\5\\3]\\n\\3\\3\\3\\3\\3\\5\\3a\\n\\3\\3\"+\n- \"\\3\\3\\3\\3\\3\\5\\3f\\n\\3\\3\\3\\3\\3\\5\\3j\\n\\3\\3\\3\\3\\3\\5\\3n\\n\\3\\3\\3\\3\\3\\5\\3r\\n\\3\"+\n- \"\\3\\3\\3\\3\\3\\3\\5\\3w\\n\\3\\3\\3\\3\\3\\3\\3\\6\\3|\\n\\3\\r\\3\\16\\3}\\3\\3\\3\\3\\3\\3\\5\\3\\u0083\"+\n- \"\\n\\3\\3\\3\\3\\3\\5\\3\\u0087\\n\\3\\5\\3\\u0089\\n\\3\\3\\4\\3\\4\\6\\4\\u008d\\n\\4\\r\\4\\16\"+\n- \"\\4\\u008e\\3\\4\\3\\4\\3\\4\\5\\4\\u0094\\n\\4\\3\\5\\3\\5\\5\\5\\u0098\\n\\5\\3\\6\\3\\6\\3\\6\\3\"+\n- \"\\7\\3\\7\\5\\7\\u009f\\n\\7\\3\\b\\3\\b\\3\\t\\3\\t\\3\\t\\3\\t\\7\\t\\u00a7\\n\\t\\f\\t\\16\\t\\u00aa\"+\n- \"\\13\\t\\3\\n\\3\\n\\3\\n\\7\\n\\u00af\\n\\n\\f\\n\\16\\n\\u00b2\\13\\n\\3\\13\\3\\13\\3\\13\\5\\13\"+\n- \"\\u00b7\\n\\13\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\5\\f\\u00c1\\n\\f\\3\\r\\3\\r\\5\\r\"+\n- \"\\u00c5\\n\\r\\3\\16\\3\\16\\3\\16\\3\\16\\7\\16\\u00cb\\n\\16\\f\\16\\16\\16\\u00ce\\13\\16\"+\n- \"\\3\\16\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\"+\n+ \"\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\5\\3X\\n\\3\\3\\3\\3\\3\\5\\3\\\\\\n\\3\\3\\3\\3\\3\\5\\3`\\n\\3\\3\\3\\3\"+\n+ \"\\3\\3\\3\\5\\3e\\n\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\"+\n+ \"\\3\\6\\3u\\n\\3\\r\\3\\16\\3v\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\5\\3\\u0080\\n\\3\\3\\4\\3\"+\n+ \"\\4\\6\\4\\u0084\\n\\4\\r\\4\\16\\4\\u0085\\3\\4\\3\\4\\3\\4\\5\\4\\u008b\\n\\4\\3\\5\\3\\5\\5\\5\"+\n+ \"\\u008f\\n\\5\\3\\6\\3\\6\\3\\6\\3\\7\\3\\7\\5\\7\\u0096\\n\\7\\3\\b\\3\\b\\3\\t\\3\\t\\3\\t\\3\\t\\7\"+\n+ \"\\t\\u009e\\n\\t\\f\\t\\16\\t\\u00a1\\13\\t\\3\\n\\3\\n\\3\\n\\7\\n\\u00a6\\n\\n\\f\\n\\16\\n\\u00a9\"+\n+ \"\\13\\n\\3\\13\\3\\13\\3\\13\\5\\13\\u00ae\\n\\13\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\5\"+\n+ \"\\f\\u00b8\\n\\f\\3\\r\\3\\r\\5\\r\\u00bc\\n\\r\\3\\16\\3\\16\\3\\16\\3\\16\\7\\16\\u00c2\\n\\16\"+\n+ \"\\f\\16\\16\\16\\u00c5\\13\\16\\3\\16\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\"+\n+ \"\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\"+\n+ \"\\17\\3\\17\\3\\17\\3\\17\\3\\17\\5\\17\\u00e3\\n\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\"+\n \"\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\"+\n- \"\\5\\17\\u00ec\\n\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\"+\n \"\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\"+\n- \"\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\7\\17\\u0112\\n\\17\"+\n- \"\\f\\17\\16\\17\\u0115\\13\\17\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\5\\20\\u011c\\n\\20\\3\\21\"+\n- \"\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\5\\21\\u0124\\n\\21\\3\\21\\3\\21\\3\\21\\5\\21\\u0129\\n\"+\n- \"\\21\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\5\\22\\u0133\\n\\22\\3\\23\\3\\23\"+\n- \"\\3\\23\\3\\23\\3\\23\\5\\23\\u013a\\n\\23\\3\\24\\3\\24\\3\\24\\5\\24\\u013f\\n\\24\\3\\25\\3\"+\n- \"\\25\\3\\25\\3\\25\\5\\25\\u0145\\n\\25\\3\\26\\3\\26\\3\\26\\5\\26\\u014a\\n\\26\\3\\27\\3\\27\"+\n- \"\\3\\27\\5\\27\\u014f\\n\\27\\3\\30\\3\\30\\3\\30\\3\\30\\5\\30\\u0155\\n\\30\\3\\30\\3\\30\\3\"+\n- \"\\30\\3\\30\\6\\30\\u015b\\n\\30\\r\\30\\16\\30\\u015c\\3\\30\\5\\30\\u0160\\n\\30\\5\\30\\u0162\"+\n- \"\\n\\30\\3\\31\\3\\31\\3\\31\\5\\31\\u0167\\n\\31\\3\\32\\3\\32\\3\\32\\3\\32\\7\\32\\u016d\\n\"+\n- \"\\32\\f\\32\\16\\32\\u0170\\13\\32\\5\\32\\u0172\\n\\32\\3\\32\\3\\32\\3\\32\\2\\3\\34\\33\\2\"+\n- \"\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\\36 \\\"$&(*,.\\60\\62\\2\\f\\4\\2\\32\\33\\37\"+\n- \" \\3\\2\\65@\\3\\2AD\\3\\2\\63\\64\\3\\2\\34\\36\\3\\2\\37 \\3\\2!#\\3\\2$\\'\\3\\2(+\\3\\2JK\\u01b3\"+\n- \"\\2\\65\\3\\2\\2\\2\\4\\u0088\\3\\2\\2\\2\\6\\u0093\\3\\2\\2\\2\\b\\u0097\\3\\2\\2\\2\\n\\u0099\"+\n- \"\\3\\2\\2\\2\\f\\u009e\\3\\2\\2\\2\\16\\u00a0\\3\\2\\2\\2\\20\\u00a2\\3\\2\\2\\2\\22\\u00ab\\3\"+\n- \"\\2\\2\\2\\24\\u00b3\\3\\2\\2\\2\\26\\u00b8\\3\\2\\2\\2\\30\\u00c2\\3\\2\\2\\2\\32\\u00c6\\3\\2\"+\n- \"\\2\\2\\34\\u00eb\\3\\2\\2\\2\\36\\u011b\\3\\2\\2\\2 \\u011d\\3\\2\\2\\2\\\"\\u012a\\3\\2\\2\\2\"+\n- \"$\\u0134\\3\\2\\2\\2&\\u013b\\3\\2\\2\\2(\\u0140\\3\\2\\2\\2*\\u0146\\3\\2\\2\\2,\\u014b\\3\"+\n- \"\\2\\2\\2.\\u0150\\3\\2\\2\\2\\60\\u0163\\3\\2\\2\\2\\62\\u0168\\3\\2\\2\\2\\64\\66\\5\\4\\3\\2\"+\n- \"\\65\\64\\3\\2\\2\\2\\66\\67\\3\\2\\2\\2\\67\\65\\3\\2\\2\\2\\678\\3\\2\\2\\289\\3\\2\\2\\29:\\7\\2\"+\n- \"\\2\\3:\\3\\3\\2\\2\\2;<\\7\\16\\2\\2<=\\7\\t\\2\\2=>\\5\\34\\17\\2>?\\7\\n\\2\\2?B\\5\\6\\4\\2@\"+\n- \"A\\7\\17\\2\\2AC\\5\\6\\4\\2B@\\3\\2\\2\\2BC\\3\\2\\2\\2C\\u0089\\3\\2\\2\\2DE\\7\\20\\2\\2EF\\7\"+\n- \"\\t\\2\\2FG\\5\\34\\17\\2GJ\\7\\n\\2\\2HK\\5\\6\\4\\2IK\\5\\b\\5\\2JH\\3\\2\\2\\2JI\\3\\2\\2\\2K\"+\n- \"\\u0089\\3\\2\\2\\2LM\\7\\21\\2\\2MN\\5\\6\\4\\2NO\\7\\20\\2\\2OP\\7\\t\\2\\2PQ\\5\\34\\17\\2Q\"+\n- \"S\\7\\n\\2\\2RT\\7\\r\\2\\2SR\\3\\2\\2\\2ST\\3\\2\\2\\2T\\u0089\\3\\2\\2\\2UV\\7\\22\\2\\2VX\\7\"+\n- \"\\t\\2\\2WY\\5\\f\\7\\2XW\\3\\2\\2\\2XY\\3\\2\\2\\2YZ\\3\\2\\2\\2Z\\\\\\7\\r\\2\\2[]\\5\\34\\17\\2\"+\n- \"\\\\[\\3\\2\\2\\2\\\\]\\3\\2\\2\\2]^\\3\\2\\2\\2^`\\7\\r\\2\\2_a\\5\\16\\b\\2`_\\3\\2\\2\\2`a\\3\\2\"+\n- \"\\2\\2ab\\3\\2\\2\\2be\\7\\n\\2\\2cf\\5\\6\\4\\2df\\5\\b\\5\\2ec\\3\\2\\2\\2ed\\3\\2\\2\\2f\\u0089\"+\n- \"\\3\\2\\2\\2gi\\5\\20\\t\\2hj\\7\\r\\2\\2ih\\3\\2\\2\\2ij\\3\\2\\2\\2j\\u0089\\3\\2\\2\\2km\\7\\23\"+\n- \"\\2\\2ln\\7\\r\\2\\2ml\\3\\2\\2\\2mn\\3\\2\\2\\2n\\u0089\\3\\2\\2\\2oq\\7\\24\\2\\2pr\\7\\r\\2\\2\"+\n- \"qp\\3\\2\\2\\2qr\\3\\2\\2\\2r\\u0089\\3\\2\\2\\2st\\7\\25\\2\\2tv\\5\\34\\17\\2uw\\7\\r\\2\\2v\"+\n- \"u\\3\\2\\2\\2vw\\3\\2\\2\\2w\\u0089\\3\\2\\2\\2xy\\7\\27\\2\\2y{\\5\\6\\4\\2z|\\5\\26\\f\\2{z\\3\"+\n- \"\\2\\2\\2|}\\3\\2\\2\\2}{\\3\\2\\2\\2}~\\3\\2\\2\\2~\\u0089\\3\\2\\2\\2\\177\\u0080\\7\\31\\2\\2\"+\n- \"\\u0080\\u0082\\5\\34\\17\\2\\u0081\\u0083\\7\\r\\2\\2\\u0082\\u0081\\3\\2\\2\\2\\u0082\\u0083\"+\n- \"\\3\\2\\2\\2\\u0083\\u0089\\3\\2\\2\\2\\u0084\\u0086\\5\\34\\17\\2\\u0085\\u0087\\7\\r\\2\\2\"+\n- \"\\u0086\\u0085\\3\\2\\2\\2\\u0086\\u0087\\3\\2\\2\\2\\u0087\\u0089\\3\\2\\2\\2\\u0088;\\3\"+\n- \"\\2\\2\\2\\u0088D\\3\\2\\2\\2\\u0088L\\3\\2\\2\\2\\u0088U\\3\\2\\2\\2\\u0088g\\3\\2\\2\\2\\u0088\"+\n- \"k\\3\\2\\2\\2\\u0088o\\3\\2\\2\\2\\u0088s\\3\\2\\2\\2\\u0088x\\3\\2\\2\\2\\u0088\\177\\3\\2\\2\"+\n- \"\\2\\u0088\\u0084\\3\\2\\2\\2\\u0089\\5\\3\\2\\2\\2\\u008a\\u008c\\7\\5\\2\\2\\u008b\\u008d\"+\n- \"\\5\\4\\3\\2\\u008c\\u008b\\3\\2\\2\\2\\u008d\\u008e\\3\\2\\2\\2\\u008e\\u008c\\3\\2\\2\\2\\u008e\"+\n- \"\\u008f\\3\\2\\2\\2\\u008f\\u0090\\3\\2\\2\\2\\u0090\\u0091\\7\\6\\2\\2\\u0091\\u0094\\3\\2\"+\n- \"\\2\\2\\u0092\\u0094\\5\\4\\3\\2\\u0093\\u008a\\3\\2\\2\\2\\u0093\\u0092\\3\\2\\2\\2\\u0094\"+\n- \"\\7\\3\\2\\2\\2\\u0095\\u0098\\5\\n\\6\\2\\u0096\\u0098\\7\\r\\2\\2\\u0097\\u0095\\3\\2\\2\\2\"+\n- \"\\u0097\\u0096\\3\\2\\2\\2\\u0098\\t\\3\\2\\2\\2\\u0099\\u009a\\7\\5\\2\\2\\u009a\\u009b\\7\"+\n- \"\\6\\2\\2\\u009b\\13\\3\\2\\2\\2\\u009c\\u009f\\5\\20\\t\\2\\u009d\\u009f\\5\\34\\17\\2\\u009e\"+\n- \"\\u009c\\3\\2\\2\\2\\u009e\\u009d\\3\\2\\2\\2\\u009f\\r\\3\\2\\2\\2\\u00a0\\u00a1\\5\\34\\17\"+\n- \"\\2\\u00a1\\17\\3\\2\\2\\2\\u00a2\\u00a3\\5\\22\\n\\2\\u00a3\\u00a8\\5\\24\\13\\2\\u00a4\\u00a5\"+\n- \"\\7\\f\\2\\2\\u00a5\\u00a7\\5\\24\\13\\2\\u00a6\\u00a4\\3\\2\\2\\2\\u00a7\\u00aa\\3\\2\\2\\2\"+\n- \"\\u00a8\\u00a6\\3\\2\\2\\2\\u00a8\\u00a9\\3\\2\\2\\2\\u00a9\\21\\3\\2\\2\\2\\u00aa\\u00a8\"+\n- \"\\3\\2\\2\\2\\u00ab\\u00b0\\5\\30\\r\\2\\u00ac\\u00ad\\7\\7\\2\\2\\u00ad\\u00af\\7\\b\\2\\2\"+\n- \"\\u00ae\\u00ac\\3\\2\\2\\2\\u00af\\u00b2\\3\\2\\2\\2\\u00b0\\u00ae\\3\\2\\2\\2\\u00b0\\u00b1\"+\n- \"\\3\\2\\2\\2\\u00b1\\23\\3\\2\\2\\2\\u00b2\\u00b0\\3\\2\\2\\2\\u00b3\\u00b6\\5\\30\\r\\2\\u00b4\"+\n- \"\\u00b5\\7\\65\\2\\2\\u00b5\\u00b7\\5\\34\\17\\2\\u00b6\\u00b4\\3\\2\\2\\2\\u00b6\\u00b7\"+\n- \"\\3\\2\\2\\2\\u00b7\\25\\3\\2\\2\\2\\u00b8\\u00b9\\7\\30\\2\\2\\u00b9\\u00ba\\7\\t\\2\\2\\u00ba\"+\n- \"\\u00bb\\5\\30\\r\\2\\u00bb\\u00bc\\5\\30\\r\\2\\u00bc\\u00bd\\3\\2\\2\\2\\u00bd\\u00c0\\7\"+\n- \"\\n\\2\\2\\u00be\\u00c1\\5\\6\\4\\2\\u00bf\\u00c1\\5\\n\\6\\2\\u00c0\\u00be\\3\\2\\2\\2\\u00c0\"+\n- \"\\u00bf\\3\\2\\2\\2\\u00c1\\27\\3\\2\\2\\2\\u00c2\\u00c4\\7I\\2\\2\\u00c3\\u00c5\\5\\32\\16\"+\n- \"\\2\\u00c4\\u00c3\\3\\2\\2\\2\\u00c4\\u00c5\\3\\2\\2\\2\\u00c5\\31\\3\\2\\2\\2\\u00c6\\u00c7\"+\n- \"\\7$\\2\\2\\u00c7\\u00cc\\5\\30\\r\\2\\u00c8\\u00c9\\7\\f\\2\\2\\u00c9\\u00cb\\5\\30\\r\\2\"+\n- \"\\u00ca\\u00c8\\3\\2\\2\\2\\u00cb\\u00ce\\3\\2\\2\\2\\u00cc\\u00ca\\3\\2\\2\\2\\u00cc\\u00cd\"+\n- \"\\3\\2\\2\\2\\u00cd\\u00cf\\3\\2\\2\\2\\u00ce\\u00cc\\3\\2\\2\\2\\u00cf\\u00d0\\7&\\2\\2\\u00d0\"+\n- \"\\33\\3\\2\\2\\2\\u00d1\\u00d2\\b\\17\\1\\2\\u00d2\\u00d3\\t\\2\\2\\2\\u00d3\\u00ec\\5\\34\"+\n- \"\\17\\20\\u00d4\\u00d5\\7\\t\\2\\2\\u00d5\\u00d6\\5\\22\\n\\2\\u00d6\\u00d7\\7\\n\\2\\2\\u00d7\"+\n- \"\\u00d8\\5\\34\\17\\17\\u00d8\\u00ec\\3\\2\\2\\2\\u00d9\\u00da\\5\\36\\20\\2\\u00da\\u00db\"+\n- \"\\t\\3\\2\\2\\u00db\\u00dc\\5\\34\\17\\3\\u00dc\\u00ec\\3\\2\\2\\2\\u00dd\\u00de\\7\\t\\2\\2\"+\n- \"\\u00de\\u00df\\5\\34\\17\\2\\u00df\\u00e0\\7\\n\\2\\2\\u00e0\\u00ec\\3\\2\\2\\2\\u00e1\\u00ec\"+\n- \"\\t\\4\\2\\2\\u00e2\\u00ec\\7F\\2\\2\\u00e3\\u00ec\\7G\\2\\2\\u00e4\\u00ec\\7H\\2\\2\\u00e5\"+\n- \"\\u00e6\\5\\36\\20\\2\\u00e6\\u00e7\\t\\5\\2\\2\\u00e7\\u00ec\\3\\2\\2\\2\\u00e8\\u00e9\\t\"+\n- \"\\5\\2\\2\\u00e9\\u00ec\\5\\36\\20\\2\\u00ea\\u00ec\\5\\36\\20\\2\\u00eb\\u00d1\\3\\2\\2\\2\"+\n- \"\\u00eb\\u00d4\\3\\2\\2\\2\\u00eb\\u00d9\\3\\2\\2\\2\\u00eb\\u00dd\\3\\2\\2\\2\\u00eb\\u00e1\"+\n- \"\\3\\2\\2\\2\\u00eb\\u00e2\\3\\2\\2\\2\\u00eb\\u00e3\\3\\2\\2\\2\\u00eb\\u00e4\\3\\2\\2\\2\\u00eb\"+\n- \"\\u00e5\\3\\2\\2\\2\\u00eb\\u00e8\\3\\2\\2\\2\\u00eb\\u00ea\\3\\2\\2\\2\\u00ec\\u0113\\3\\2\"+\n- \"\\2\\2\\u00ed\\u00ee\\f\\16\\2\\2\\u00ee\\u00ef\\t\\6\\2\\2\\u00ef\\u0112\\5\\34\\17\\17\\u00f0\"+\n- \"\\u00f1\\f\\r\\2\\2\\u00f1\\u00f2\\t\\7\\2\\2\\u00f2\\u0112\\5\\34\\17\\16\\u00f3\\u00f4\"+\n- \"\\f\\f\\2\\2\\u00f4\\u00f5\\t\\b\\2\\2\\u00f5\\u0112\\5\\34\\17\\r\\u00f6\\u00f7\\f\\13\\2\"+\n- \"\\2\\u00f7\\u00f8\\t\\t\\2\\2\\u00f8\\u0112\\5\\34\\17\\f\\u00f9\\u00fa\\f\\n\\2\\2\\u00fa\"+\n- \"\\u00fb\\t\\n\\2\\2\\u00fb\\u0112\\5\\34\\17\\13\\u00fc\\u00fd\\f\\t\\2\\2\\u00fd\\u00fe\"+\n- \"\\7,\\2\\2\\u00fe\\u0112\\5\\34\\17\\n\\u00ff\\u0100\\f\\b\\2\\2\\u0100\\u0101\\7-\\2\\2\\u0101\"+\n- \"\\u0112\\5\\34\\17\\t\\u0102\\u0103\\f\\7\\2\\2\\u0103\\u0104\\7.\\2\\2\\u0104\\u0112\\5\"+\n- \"\\34\\17\\b\\u0105\\u0106\\f\\6\\2\\2\\u0106\\u0107\\7/\\2\\2\\u0107\\u0112\\5\\34\\17\\7\"+\n- \"\\u0108\\u0109\\f\\5\\2\\2\\u0109\\u010a\\7\\60\\2\\2\\u010a\\u0112\\5\\34\\17\\6\\u010b\"+\n- \"\\u010c\\f\\4\\2\\2\\u010c\\u010d\\7\\61\\2\\2\\u010d\\u010e\\5\\34\\17\\2\\u010e\\u010f\"+\n- \"\\7\\62\\2\\2\\u010f\\u0110\\5\\34\\17\\4\\u0110\\u0112\\3\\2\\2\\2\\u0111\\u00ed\\3\\2\\2\"+\n- \"\\2\\u0111\\u00f0\\3\\2\\2\\2\\u0111\\u00f3\\3\\2\\2\\2\\u0111\\u00f6\\3\\2\\2\\2\\u0111\\u00f9\"+\n- \"\\3\\2\\2\\2\\u0111\\u00fc\\3\\2\\2\\2\\u0111\\u00ff\\3\\2\\2\\2\\u0111\\u0102\\3\\2\\2\\2\\u0111\"+\n- \"\\u0105\\3\\2\\2\\2\\u0111\\u0108\\3\\2\\2\\2\\u0111\\u010b\\3\\2\\2\\2\\u0112\\u0115\\3\\2\"+\n- \"\\2\\2\\u0113\\u0111\\3\\2\\2\\2\\u0113\\u0114\\3\\2\\2\\2\\u0114\\35\\3\\2\\2\\2\\u0115\\u0113\"+\n- \"\\3\\2\\2\\2\\u0116\\u011c\\5 \\21\\2\\u0117\\u011c\\5\\\"\\22\\2\\u0118\\u011c\\5*\\26\\2\"+\n- \"\\u0119\\u011c\\5.\\30\\2\\u011a\\u011c\\5\\60\\31\\2\\u011b\\u0116\\3\\2\\2\\2\\u011b\\u0117\"+\n- \"\\3\\2\\2\\2\\u011b\\u0118\\3\\2\\2\\2\\u011b\\u0119\\3\\2\\2\\2\\u011b\\u011a\\3\\2\\2\\2\\u011c\"+\n- \"\\37\\3\\2\\2\\2\\u011d\\u0123\\7\\t\\2\\2\\u011e\\u0124\\5 \\21\\2\\u011f\\u0124\\5\\\"\\22\"+\n- \"\\2\\u0120\\u0124\\5*\\26\\2\\u0121\\u0124\\5.\\30\\2\\u0122\\u0124\\5\\60\\31\\2\\u0123\"+\n- \"\\u011e\\3\\2\\2\\2\\u0123\\u011f\\3\\2\\2\\2\\u0123\\u0120\\3\\2\\2\\2\\u0123\\u0121\\3\\2\"+\n- \"\\2\\2\\u0123\\u0122\\3\\2\\2\\2\\u0124\\u0125\\3\\2\\2\\2\\u0125\\u0128\\7\\n\\2\\2\\u0126\"+\n- \"\\u0129\\5&\\24\\2\\u0127\\u0129\\5$\\23\\2\\u0128\\u0126\\3\\2\\2\\2\\u0128\\u0127\\3\\2\"+\n- \"\\2\\2\\u0128\\u0129\\3\\2\\2\\2\\u0129!\\3\\2\\2\\2\\u012a\\u012b\\7\\t\\2\\2\\u012b\\u012c\"+\n- \"\\5\\22\\n\\2\\u012c\\u0132\\7\\n\\2\\2\\u012d\\u0133\\5 \\21\\2\\u012e\\u0133\\5\\\"\\22\\2\"+\n- \"\\u012f\\u0133\\5*\\26\\2\\u0130\\u0133\\5.\\30\\2\\u0131\\u0133\\5\\60\\31\\2\\u0132\\u012d\"+\n- \"\\3\\2\\2\\2\\u0132\\u012e\\3\\2\\2\\2\\u0132\\u012f\\3\\2\\2\\2\\u0132\\u0130\\3\\2\\2\\2\\u0132\"+\n- \"\\u0131\\3\\2\\2\\2\\u0133#\\3\\2\\2\\2\\u0134\\u0135\\7\\7\\2\\2\\u0135\\u0136\\5\\34\\17\"+\n- \"\\2\\u0136\\u0139\\7\\b\\2\\2\\u0137\\u013a\\5&\\24\\2\\u0138\\u013a\\5$\\23\\2\\u0139\\u0137\"+\n- \"\\3\\2\\2\\2\\u0139\\u0138\\3\\2\\2\\2\\u0139\\u013a\\3\\2\\2\\2\\u013a%\\3\\2\\2\\2\\u013b\"+\n- \"\\u013e\\7\\13\\2\\2\\u013c\\u013f\\5(\\25\\2\\u013d\\u013f\\5,\\27\\2\\u013e\\u013c\\3\"+\n- \"\\2\\2\\2\\u013e\\u013d\\3\\2\\2\\2\\u013f\\'\\3\\2\\2\\2\\u0140\\u0141\\7K\\2\\2\\u0141\\u0144\"+\n- \"\\5\\62\\32\\2\\u0142\\u0145\\5&\\24\\2\\u0143\\u0145\\5$\\23\\2\\u0144\\u0142\\3\\2\\2\\2\"+\n- \"\\u0144\\u0143\\3\\2\\2\\2\\u0144\\u0145\\3\\2\\2\\2\\u0145)\\3\\2\\2\\2\\u0146\\u0149\\5\"+\n- \"\\30\\r\\2\\u0147\\u014a\\5&\\24\\2\\u0148\\u014a\\5$\\23\\2\\u0149\\u0147\\3\\2\\2\\2\\u0149\"+\n- \"\\u0148\\3\\2\\2\\2\\u0149\\u014a\\3\\2\\2\\2\\u014a+\\3\\2\\2\\2\\u014b\\u014e\\t\\13\\2\\2\"+\n- \"\\u014c\\u014f\\5&\\24\\2\\u014d\\u014f\\5$\\23\\2\\u014e\\u014c\\3\\2\\2\\2\\u014e\\u014d\"+\n- \"\\3\\2\\2\\2\\u014e\\u014f\\3\\2\\2\\2\\u014f-\\3\\2\\2\\2\\u0150\\u0151\\7\\26\\2\\2\\u0151\"+\n- \"\\u0161\\5\\30\\r\\2\\u0152\\u0154\\5\\62\\32\\2\\u0153\\u0155\\5&\\24\\2\\u0154\\u0153\"+\n- \"\\3\\2\\2\\2\\u0154\\u0155\\3\\2\\2\\2\\u0155\\u0162\\3\\2\\2\\2\\u0156\\u0157\\7\\7\\2\\2\\u0157\"+\n- \"\\u0158\\5\\34\\17\\2\\u0158\\u0159\\7\\b\\2\\2\\u0159\\u015b\\3\\2\\2\\2\\u015a\\u0156\\3\"+\n- \"\\2\\2\\2\\u015b\\u015c\\3\\2\\2\\2\\u015c\\u015a\\3\\2\\2\\2\\u015c\\u015d\\3\\2\\2\\2\\u015d\"+\n- \"\\u015f\\3\\2\\2\\2\\u015e\\u0160\\5&\\24\\2\\u015f\\u015e\\3\\2\\2\\2\\u015f\\u0160\\3\\2\"+\n- \"\\2\\2\\u0160\\u0162\\3\\2\\2\\2\\u0161\\u0152\\3\\2\\2\\2\\u0161\\u015a\\3\\2\\2\\2\\u0162\"+\n- \"/\\3\\2\\2\\2\\u0163\\u0166\\7E\\2\\2\\u0164\\u0167\\5&\\24\\2\\u0165\\u0167\\5$\\23\\2\\u0166\"+\n- \"\\u0164\\3\\2\\2\\2\\u0166\\u0165\\3\\2\\2\\2\\u0166\\u0167\\3\\2\\2\\2\\u0167\\61\\3\\2\\2\"+\n- \"\\2\\u0168\\u0171\\7\\t\\2\\2\\u0169\\u016e\\5\\34\\17\\2\\u016a\\u016b\\7\\f\\2\\2\\u016b\"+\n- \"\\u016d\\5\\34\\17\\2\\u016c\\u016a\\3\\2\\2\\2\\u016d\\u0170\\3\\2\\2\\2\\u016e\\u016c\\3\"+\n- \"\\2\\2\\2\\u016e\\u016f\\3\\2\\2\\2\\u016f\\u0172\\3\\2\\2\\2\\u0170\\u016e\\3\\2\\2\\2\\u0171\"+\n- \"\\u0169\\3\\2\\2\\2\\u0171\\u0172\\3\\2\\2\\2\\u0172\\u0173\\3\\2\\2\\2\\u0173\\u0174\\7\\n\"+\n- \"\\2\\2\\u0174\\63\\3\\2\\2\\2/\\67BJSX\\\\`eimqv}\\u0082\\u0086\\u0088\\u008e\\u0093\\u0097\"+\n- \"\\u009e\\u00a8\\u00b0\\u00b6\\u00c0\\u00c4\\u00cc\\u00eb\\u0111\\u0113\\u011b\\u0123\"+\n- \"\\u0128\\u0132\\u0139\\u013e\\u0144\\u0149\\u014e\\u0154\\u015c\\u015f\\u0161\\u0166\"+\n- \"\\u016e\\u0171\";\n+ \"\\3\\17\\3\\17\\7\\17\\u0109\\n\\17\\f\\17\\16\\17\\u010c\\13\\17\\3\\20\\3\\20\\3\\20\\3\\20\"+\n+ \"\\3\\20\\5\\20\\u0113\\n\\20\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\5\\21\\u011b\\n\\21\\3\"+\n+ \"\\21\\3\\21\\3\\21\\5\\21\\u0120\\n\\21\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\"+\n+ \"\\5\\22\\u012a\\n\\22\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\5\\23\\u0131\\n\\23\\3\\24\\3\\24\\3\"+\n+ \"\\24\\5\\24\\u0136\\n\\24\\3\\25\\3\\25\\3\\25\\3\\25\\5\\25\\u013c\\n\\25\\3\\26\\3\\26\\3\\26\"+\n+ \"\\5\\26\\u0141\\n\\26\\3\\27\\3\\27\\3\\27\\5\\27\\u0146\\n\\27\\3\\30\\3\\30\\3\\30\\3\\30\\5\"+\n+ \"\\30\\u014c\\n\\30\\3\\30\\3\\30\\3\\30\\3\\30\\6\\30\\u0152\\n\\30\\r\\30\\16\\30\\u0153\\3\"+\n+ \"\\30\\5\\30\\u0157\\n\\30\\5\\30\\u0159\\n\\30\\3\\31\\3\\31\\3\\31\\5\\31\\u015e\\n\\31\\3\\32\"+\n+ \"\\3\\32\\3\\32\\3\\32\\7\\32\\u0164\\n\\32\\f\\32\\16\\32\\u0167\\13\\32\\5\\32\\u0169\\n\\32\"+\n+ \"\\3\\32\\3\\32\\3\\32\\2\\3\\34\\33\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\\36 \\\"$&\"+\n+ \"(*,.\\60\\62\\2\\r\\3\\3\\r\\r\\4\\2\\32\\33\\37 \\3\\2\\65@\\3\\2AD\\3\\2\\63\\64\\3\\2\\34\\36\"+\n+ \"\\3\\2\\37 \\3\\2!#\\3\\2$\\'\\3\\2(+\\3\\2JK\\u01a3\\2\\65\\3\\2\\2\\2\\4\\177\\3\\2\\2\\2\\6\\u008a\"+\n+ \"\\3\\2\\2\\2\\b\\u008e\\3\\2\\2\\2\\n\\u0090\\3\\2\\2\\2\\f\\u0095\\3\\2\\2\\2\\16\\u0097\\3\\2\"+\n+ \"\\2\\2\\20\\u0099\\3\\2\\2\\2\\22\\u00a2\\3\\2\\2\\2\\24\\u00aa\\3\\2\\2\\2\\26\\u00af\\3\\2\\2\"+\n+ \"\\2\\30\\u00b9\\3\\2\\2\\2\\32\\u00bd\\3\\2\\2\\2\\34\\u00e2\\3\\2\\2\\2\\36\\u0112\\3\\2\\2\\2\"+\n+ \" \\u0114\\3\\2\\2\\2\\\"\\u0121\\3\\2\\2\\2$\\u012b\\3\\2\\2\\2&\\u0132\\3\\2\\2\\2(\\u0137\\3\"+\n+ \"\\2\\2\\2*\\u013d\\3\\2\\2\\2,\\u0142\\3\\2\\2\\2.\\u0147\\3\\2\\2\\2\\60\\u015a\\3\\2\\2\\2\\62\"+\n+ \"\\u015f\\3\\2\\2\\2\\64\\66\\5\\4\\3\\2\\65\\64\\3\\2\\2\\2\\66\\67\\3\\2\\2\\2\\67\\65\\3\\2\\2\\2\"+\n+ \"\\678\\3\\2\\2\\289\\3\\2\\2\\29:\\7\\2\\2\\3:\\3\\3\\2\\2\\2;<\\7\\16\\2\\2<=\\7\\t\\2\\2=>\\5\\34\"+\n+ \"\\17\\2>?\\7\\n\\2\\2?B\\5\\6\\4\\2@A\\7\\17\\2\\2AC\\5\\6\\4\\2B@\\3\\2\\2\\2BC\\3\\2\\2\\2C\\u0080\"+\n+ \"\\3\\2\\2\\2DE\\7\\20\\2\\2EF\\7\\t\\2\\2FG\\5\\34\\17\\2GJ\\7\\n\\2\\2HK\\5\\6\\4\\2IK\\5\\b\\5\"+\n+ \"\\2JH\\3\\2\\2\\2JI\\3\\2\\2\\2K\\u0080\\3\\2\\2\\2LM\\7\\21\\2\\2MN\\5\\6\\4\\2NO\\7\\20\\2\\2\"+\n+ \"OP\\7\\t\\2\\2PQ\\5\\34\\17\\2QR\\7\\n\\2\\2RS\\t\\2\\2\\2S\\u0080\\3\\2\\2\\2TU\\7\\22\\2\\2U\"+\n+ \"W\\7\\t\\2\\2VX\\5\\f\\7\\2WV\\3\\2\\2\\2WX\\3\\2\\2\\2XY\\3\\2\\2\\2Y[\\7\\r\\2\\2Z\\\\\\5\\34\\17\"+\n+ \"\\2[Z\\3\\2\\2\\2[\\\\\\3\\2\\2\\2\\\\]\\3\\2\\2\\2]_\\7\\r\\2\\2^`\\5\\16\\b\\2_^\\3\\2\\2\\2_`\\3\"+\n+ \"\\2\\2\\2`a\\3\\2\\2\\2ad\\7\\n\\2\\2be\\5\\6\\4\\2ce\\5\\b\\5\\2db\\3\\2\\2\\2dc\\3\\2\\2\\2e\\u0080\"+\n+ \"\\3\\2\\2\\2fg\\5\\20\\t\\2gh\\t\\2\\2\\2h\\u0080\\3\\2\\2\\2ij\\7\\23\\2\\2j\\u0080\\t\\2\\2\\2\"+\n+ \"kl\\7\\24\\2\\2l\\u0080\\t\\2\\2\\2mn\\7\\25\\2\\2no\\5\\34\\17\\2op\\t\\2\\2\\2p\\u0080\\3\\2\"+\n+ \"\\2\\2qr\\7\\27\\2\\2rt\\5\\6\\4\\2su\\5\\26\\f\\2ts\\3\\2\\2\\2uv\\3\\2\\2\\2vt\\3\\2\\2\\2vw\\3\"+\n+ \"\\2\\2\\2w\\u0080\\3\\2\\2\\2xy\\7\\31\\2\\2yz\\5\\34\\17\\2z{\\t\\2\\2\\2{\\u0080\\3\\2\\2\\2\"+\n+ \"|}\\5\\34\\17\\2}~\\t\\2\\2\\2~\\u0080\\3\\2\\2\\2\\177;\\3\\2\\2\\2\\177D\\3\\2\\2\\2\\177L\\3\"+\n+ \"\\2\\2\\2\\177T\\3\\2\\2\\2\\177f\\3\\2\\2\\2\\177i\\3\\2\\2\\2\\177k\\3\\2\\2\\2\\177m\\3\\2\\2\"+\n+ \"\\2\\177q\\3\\2\\2\\2\\177x\\3\\2\\2\\2\\177|\\3\\2\\2\\2\\u0080\\5\\3\\2\\2\\2\\u0081\\u0083\"+\n+ \"\\7\\5\\2\\2\\u0082\\u0084\\5\\4\\3\\2\\u0083\\u0082\\3\\2\\2\\2\\u0084\\u0085\\3\\2\\2\\2\\u0085\"+\n+ \"\\u0083\\3\\2\\2\\2\\u0085\\u0086\\3\\2\\2\\2\\u0086\\u0087\\3\\2\\2\\2\\u0087\\u0088\\7\\6\"+\n+ \"\\2\\2\\u0088\\u008b\\3\\2\\2\\2\\u0089\\u008b\\5\\4\\3\\2\\u008a\\u0081\\3\\2\\2\\2\\u008a\"+\n+ \"\\u0089\\3\\2\\2\\2\\u008b\\7\\3\\2\\2\\2\\u008c\\u008f\\5\\n\\6\\2\\u008d\\u008f\\7\\r\\2\\2\"+\n+ \"\\u008e\\u008c\\3\\2\\2\\2\\u008e\\u008d\\3\\2\\2\\2\\u008f\\t\\3\\2\\2\\2\\u0090\\u0091\\7\"+\n+ \"\\5\\2\\2\\u0091\\u0092\\7\\6\\2\\2\\u0092\\13\\3\\2\\2\\2\\u0093\\u0096\\5\\20\\t\\2\\u0094\"+\n+ \"\\u0096\\5\\34\\17\\2\\u0095\\u0093\\3\\2\\2\\2\\u0095\\u0094\\3\\2\\2\\2\\u0096\\r\\3\\2\\2\"+\n+ \"\\2\\u0097\\u0098\\5\\34\\17\\2\\u0098\\17\\3\\2\\2\\2\\u0099\\u009a\\5\\22\\n\\2\\u009a\\u009f\"+\n+ \"\\5\\24\\13\\2\\u009b\\u009c\\7\\f\\2\\2\\u009c\\u009e\\5\\24\\13\\2\\u009d\\u009b\\3\\2\\2\"+\n+ \"\\2\\u009e\\u00a1\\3\\2\\2\\2\\u009f\\u009d\\3\\2\\2\\2\\u009f\\u00a0\\3\\2\\2\\2\\u00a0\\21\"+\n+ \"\\3\\2\\2\\2\\u00a1\\u009f\\3\\2\\2\\2\\u00a2\\u00a7\\5\\30\\r\\2\\u00a3\\u00a4\\7\\7\\2\\2\"+\n+ \"\\u00a4\\u00a6\\7\\b\\2\\2\\u00a5\\u00a3\\3\\2\\2\\2\\u00a6\\u00a9\\3\\2\\2\\2\\u00a7\\u00a5\"+\n+ \"\\3\\2\\2\\2\\u00a7\\u00a8\\3\\2\\2\\2\\u00a8\\23\\3\\2\\2\\2\\u00a9\\u00a7\\3\\2\\2\\2\\u00aa\"+\n+ \"\\u00ad\\5\\30\\r\\2\\u00ab\\u00ac\\7\\65\\2\\2\\u00ac\\u00ae\\5\\34\\17\\2\\u00ad\\u00ab\"+\n+ \"\\3\\2\\2\\2\\u00ad\\u00ae\\3\\2\\2\\2\\u00ae\\25\\3\\2\\2\\2\\u00af\\u00b0\\7\\30\\2\\2\\u00b0\"+\n+ \"\\u00b1\\7\\t\\2\\2\\u00b1\\u00b2\\5\\30\\r\\2\\u00b2\\u00b3\\5\\30\\r\\2\\u00b3\\u00b4\\3\"+\n+ \"\\2\\2\\2\\u00b4\\u00b7\\7\\n\\2\\2\\u00b5\\u00b8\\5\\6\\4\\2\\u00b6\\u00b8\\5\\n\\6\\2\\u00b7\"+\n+ \"\\u00b5\\3\\2\\2\\2\\u00b7\\u00b6\\3\\2\\2\\2\\u00b8\\27\\3\\2\\2\\2\\u00b9\\u00bb\\7I\\2\\2\"+\n+ \"\\u00ba\\u00bc\\5\\32\\16\\2\\u00bb\\u00ba\\3\\2\\2\\2\\u00bb\\u00bc\\3\\2\\2\\2\\u00bc\\31\"+\n+ \"\\3\\2\\2\\2\\u00bd\\u00be\\7$\\2\\2\\u00be\\u00c3\\5\\30\\r\\2\\u00bf\\u00c0\\7\\f\\2\\2\\u00c0\"+\n+ \"\\u00c2\\5\\30\\r\\2\\u00c1\\u00bf\\3\\2\\2\\2\\u00c2\\u00c5\\3\\2\\2\\2\\u00c3\\u00c1\\3\"+\n+ \"\\2\\2\\2\\u00c3\\u00c4\\3\\2\\2\\2\\u00c4\\u00c6\\3\\2\\2\\2\\u00c5\\u00c3\\3\\2\\2\\2\\u00c6\"+\n+ \"\\u00c7\\7&\\2\\2\\u00c7\\33\\3\\2\\2\\2\\u00c8\\u00c9\\b\\17\\1\\2\\u00c9\\u00ca\\t\\3\\2\"+\n+ \"\\2\\u00ca\\u00e3\\5\\34\\17\\20\\u00cb\\u00cc\\7\\t\\2\\2\\u00cc\\u00cd\\5\\22\\n\\2\\u00cd\"+\n+ \"\\u00ce\\7\\n\\2\\2\\u00ce\\u00cf\\5\\34\\17\\17\\u00cf\\u00e3\\3\\2\\2\\2\\u00d0\\u00d1\"+\n+ \"\\5\\36\\20\\2\\u00d1\\u00d2\\t\\4\\2\\2\\u00d2\\u00d3\\5\\34\\17\\3\\u00d3\\u00e3\\3\\2\\2\"+\n+ \"\\2\\u00d4\\u00d5\\7\\t\\2\\2\\u00d5\\u00d6\\5\\34\\17\\2\\u00d6\\u00d7\\7\\n\\2\\2\\u00d7\"+\n+ \"\\u00e3\\3\\2\\2\\2\\u00d8\\u00e3\\t\\5\\2\\2\\u00d9\\u00e3\\7F\\2\\2\\u00da\\u00e3\\7G\\2\"+\n+ \"\\2\\u00db\\u00e3\\7H\\2\\2\\u00dc\\u00dd\\5\\36\\20\\2\\u00dd\\u00de\\t\\6\\2\\2\\u00de\"+\n+ \"\\u00e3\\3\\2\\2\\2\\u00df\\u00e0\\t\\6\\2\\2\\u00e0\\u00e3\\5\\36\\20\\2\\u00e1\\u00e3\\5\"+\n+ \"\\36\\20\\2\\u00e2\\u00c8\\3\\2\\2\\2\\u00e2\\u00cb\\3\\2\\2\\2\\u00e2\\u00d0\\3\\2\\2\\2\\u00e2\"+\n+ \"\\u00d4\\3\\2\\2\\2\\u00e2\\u00d8\\3\\2\\2\\2\\u00e2\\u00d9\\3\\2\\2\\2\\u00e2\\u00da\\3\\2\"+\n+ \"\\2\\2\\u00e2\\u00db\\3\\2\\2\\2\\u00e2\\u00dc\\3\\2\\2\\2\\u00e2\\u00df\\3\\2\\2\\2\\u00e2\"+\n+ \"\\u00e1\\3\\2\\2\\2\\u00e3\\u010a\\3\\2\\2\\2\\u00e4\\u00e5\\f\\16\\2\\2\\u00e5\\u00e6\\t\"+\n+ \"\\7\\2\\2\\u00e6\\u0109\\5\\34\\17\\17\\u00e7\\u00e8\\f\\r\\2\\2\\u00e8\\u00e9\\t\\b\\2\\2\"+\n+ \"\\u00e9\\u0109\\5\\34\\17\\16\\u00ea\\u00eb\\f\\f\\2\\2\\u00eb\\u00ec\\t\\t\\2\\2\\u00ec\"+\n+ \"\\u0109\\5\\34\\17\\r\\u00ed\\u00ee\\f\\13\\2\\2\\u00ee\\u00ef\\t\\n\\2\\2\\u00ef\\u0109\"+\n+ \"\\5\\34\\17\\f\\u00f0\\u00f1\\f\\n\\2\\2\\u00f1\\u00f2\\t\\13\\2\\2\\u00f2\\u0109\\5\\34\\17\"+\n+ \"\\13\\u00f3\\u00f4\\f\\t\\2\\2\\u00f4\\u00f5\\7,\\2\\2\\u00f5\\u0109\\5\\34\\17\\n\\u00f6\"+\n+ \"\\u00f7\\f\\b\\2\\2\\u00f7\\u00f8\\7-\\2\\2\\u00f8\\u0109\\5\\34\\17\\t\\u00f9\\u00fa\\f\"+\n+ \"\\7\\2\\2\\u00fa\\u00fb\\7.\\2\\2\\u00fb\\u0109\\5\\34\\17\\b\\u00fc\\u00fd\\f\\6\\2\\2\\u00fd\"+\n+ \"\\u00fe\\7/\\2\\2\\u00fe\\u0109\\5\\34\\17\\7\\u00ff\\u0100\\f\\5\\2\\2\\u0100\\u0101\\7\"+\n+ \"\\60\\2\\2\\u0101\\u0109\\5\\34\\17\\6\\u0102\\u0103\\f\\4\\2\\2\\u0103\\u0104\\7\\61\\2\\2\"+\n+ \"\\u0104\\u0105\\5\\34\\17\\2\\u0105\\u0106\\7\\62\\2\\2\\u0106\\u0107\\5\\34\\17\\4\\u0107\"+\n+ \"\\u0109\\3\\2\\2\\2\\u0108\\u00e4\\3\\2\\2\\2\\u0108\\u00e7\\3\\2\\2\\2\\u0108\\u00ea\\3\\2\"+\n+ \"\\2\\2\\u0108\\u00ed\\3\\2\\2\\2\\u0108\\u00f0\\3\\2\\2\\2\\u0108\\u00f3\\3\\2\\2\\2\\u0108\"+\n+ \"\\u00f6\\3\\2\\2\\2\\u0108\\u00f9\\3\\2\\2\\2\\u0108\\u00fc\\3\\2\\2\\2\\u0108\\u00ff\\3\\2\"+\n+ \"\\2\\2\\u0108\\u0102\\3\\2\\2\\2\\u0109\\u010c\\3\\2\\2\\2\\u010a\\u0108\\3\\2\\2\\2\\u010a\"+\n+ \"\\u010b\\3\\2\\2\\2\\u010b\\35\\3\\2\\2\\2\\u010c\\u010a\\3\\2\\2\\2\\u010d\\u0113\\5 \\21\"+\n+ \"\\2\\u010e\\u0113\\5\\\"\\22\\2\\u010f\\u0113\\5*\\26\\2\\u0110\\u0113\\5.\\30\\2\\u0111\"+\n+ \"\\u0113\\5\\60\\31\\2\\u0112\\u010d\\3\\2\\2\\2\\u0112\\u010e\\3\\2\\2\\2\\u0112\\u010f\\3\"+\n+ \"\\2\\2\\2\\u0112\\u0110\\3\\2\\2\\2\\u0112\\u0111\\3\\2\\2\\2\\u0113\\37\\3\\2\\2\\2\\u0114\"+\n+ \"\\u011a\\7\\t\\2\\2\\u0115\\u011b\\5 \\21\\2\\u0116\\u011b\\5\\\"\\22\\2\\u0117\\u011b\\5\"+\n+ \"*\\26\\2\\u0118\\u011b\\5.\\30\\2\\u0119\\u011b\\5\\60\\31\\2\\u011a\\u0115\\3\\2\\2\\2\\u011a\"+\n+ \"\\u0116\\3\\2\\2\\2\\u011a\\u0117\\3\\2\\2\\2\\u011a\\u0118\\3\\2\\2\\2\\u011a\\u0119\\3\\2\"+\n+ \"\\2\\2\\u011b\\u011c\\3\\2\\2\\2\\u011c\\u011f\\7\\n\\2\\2\\u011d\\u0120\\5&\\24\\2\\u011e\"+\n+ \"\\u0120\\5$\\23\\2\\u011f\\u011d\\3\\2\\2\\2\\u011f\\u011e\\3\\2\\2\\2\\u011f\\u0120\\3\\2\"+\n+ \"\\2\\2\\u0120!\\3\\2\\2\\2\\u0121\\u0122\\7\\t\\2\\2\\u0122\\u0123\\5\\22\\n\\2\\u0123\\u0129\"+\n+ \"\\7\\n\\2\\2\\u0124\\u012a\\5 \\21\\2\\u0125\\u012a\\5\\\"\\22\\2\\u0126\\u012a\\5*\\26\\2\"+\n+ \"\\u0127\\u012a\\5.\\30\\2\\u0128\\u012a\\5\\60\\31\\2\\u0129\\u0124\\3\\2\\2\\2\\u0129\\u0125\"+\n+ \"\\3\\2\\2\\2\\u0129\\u0126\\3\\2\\2\\2\\u0129\\u0127\\3\\2\\2\\2\\u0129\\u0128\\3\\2\\2\\2\\u012a\"+\n+ \"#\\3\\2\\2\\2\\u012b\\u012c\\7\\7\\2\\2\\u012c\\u012d\\5\\34\\17\\2\\u012d\\u0130\\7\\b\\2\"+\n+ \"\\2\\u012e\\u0131\\5&\\24\\2\\u012f\\u0131\\5$\\23\\2\\u0130\\u012e\\3\\2\\2\\2\\u0130\\u012f\"+\n+ \"\\3\\2\\2\\2\\u0130\\u0131\\3\\2\\2\\2\\u0131%\\3\\2\\2\\2\\u0132\\u0135\\7\\13\\2\\2\\u0133\"+\n+ \"\\u0136\\5(\\25\\2\\u0134\\u0136\\5,\\27\\2\\u0135\\u0133\\3\\2\\2\\2\\u0135\\u0134\\3\\2\"+\n+ \"\\2\\2\\u0136\\'\\3\\2\\2\\2\\u0137\\u0138\\7K\\2\\2\\u0138\\u013b\\5\\62\\32\\2\\u0139\\u013c\"+\n+ \"\\5&\\24\\2\\u013a\\u013c\\5$\\23\\2\\u013b\\u0139\\3\\2\\2\\2\\u013b\\u013a\\3\\2\\2\\2\\u013b\"+\n+ \"\\u013c\\3\\2\\2\\2\\u013c)\\3\\2\\2\\2\\u013d\\u0140\\5\\30\\r\\2\\u013e\\u0141\\5&\\24\\2\"+\n+ \"\\u013f\\u0141\\5$\\23\\2\\u0140\\u013e\\3\\2\\2\\2\\u0140\\u013f\\3\\2\\2\\2\\u0140\\u0141\"+\n+ \"\\3\\2\\2\\2\\u0141+\\3\\2\\2\\2\\u0142\\u0145\\t\\f\\2\\2\\u0143\\u0146\\5&\\24\\2\\u0144\"+\n+ \"\\u0146\\5$\\23\\2\\u0145\\u0143\\3\\2\\2\\2\\u0145\\u0144\\3\\2\\2\\2\\u0145\\u0146\\3\\2\"+\n+ \"\\2\\2\\u0146-\\3\\2\\2\\2\\u0147\\u0148\\7\\26\\2\\2\\u0148\\u0158\\5\\30\\r\\2\\u0149\\u014b\"+\n+ \"\\5\\62\\32\\2\\u014a\\u014c\\5&\\24\\2\\u014b\\u014a\\3\\2\\2\\2\\u014b\\u014c\\3\\2\\2\\2\"+\n+ \"\\u014c\\u0159\\3\\2\\2\\2\\u014d\\u014e\\7\\7\\2\\2\\u014e\\u014f\\5\\34\\17\\2\\u014f\\u0150\"+\n+ \"\\7\\b\\2\\2\\u0150\\u0152\\3\\2\\2\\2\\u0151\\u014d\\3\\2\\2\\2\\u0152\\u0153\\3\\2\\2\\2\\u0153\"+\n+ \"\\u0151\\3\\2\\2\\2\\u0153\\u0154\\3\\2\\2\\2\\u0154\\u0156\\3\\2\\2\\2\\u0155\\u0157\\5&\"+\n+ \"\\24\\2\\u0156\\u0155\\3\\2\\2\\2\\u0156\\u0157\\3\\2\\2\\2\\u0157\\u0159\\3\\2\\2\\2\\u0158\"+\n+ \"\\u0149\\3\\2\\2\\2\\u0158\\u0151\\3\\2\\2\\2\\u0159/\\3\\2\\2\\2\\u015a\\u015d\\7E\\2\\2\\u015b\"+\n+ \"\\u015e\\5&\\24\\2\\u015c\\u015e\\5$\\23\\2\\u015d\\u015b\\3\\2\\2\\2\\u015d\\u015c\\3\\2\"+\n+ \"\\2\\2\\u015d\\u015e\\3\\2\\2\\2\\u015e\\61\\3\\2\\2\\2\\u015f\\u0168\\7\\t\\2\\2\\u0160\\u0165\"+\n+ \"\\5\\34\\17\\2\\u0161\\u0162\\7\\f\\2\\2\\u0162\\u0164\\5\\34\\17\\2\\u0163\\u0161\\3\\2\\2\"+\n+ \"\\2\\u0164\\u0167\\3\\2\\2\\2\\u0165\\u0163\\3\\2\\2\\2\\u0165\\u0166\\3\\2\\2\\2\\u0166\\u0169\"+\n+ \"\\3\\2\\2\\2\\u0167\\u0165\\3\\2\\2\\2\\u0168\\u0160\\3\\2\\2\\2\\u0168\\u0169\\3\\2\\2\\2\\u0169\"+\n+ \"\\u016a\\3\\2\\2\\2\\u016a\\u016b\\7\\n\\2\\2\\u016b\\63\\3\\2\\2\\2(\\67BJW[_dv\\177\\u0085\"+\n+ \"\\u008a\\u008e\\u0095\\u009f\\u00a7\\u00ad\\u00b7\\u00bb\\u00c3\\u00e2\\u0108\\u010a\"+\n+ \"\\u0112\\u011a\\u011f\\u0129\\u0130\\u0135\\u013b\\u0140\\u0145\\u014b\\u0153\\u0156\"+\n+ \"\\u0158\\u015d\\u0165\\u0168\";\n public static final ATN _ATN =\n new ATNDeserializer().deserialize(_serializedATN.toCharArray());\n static {", "filename": "modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java", "status": "modified" }, { "diff": "", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/DefOperationTests.java", "status": "modified" }, { "diff": "@@ -19,161 +19,60 @@\n \n package org.elasticsearch.painless;\n \n+import java.util.Collections;\n import java.util.HashMap;\n import java.util.Map;\n \n public class NoSemiColonTests extends ScriptTestCase {\n \n- public void testIfStatement() {\n- assertEquals(1, exec(\"int x = 5 if (x == 5) return 1 return 0\"));\n- assertEquals(0, exec(\"int x = 4 if (x == 5) return 1 else return 0\"));\n- assertEquals(2, exec(\"int x = 4 if (x == 5) return 1 else if (x == 4) return 2 else return 0\"));\n- assertEquals(1, exec(\"int x = 4 if (x == 5) return 1 else if (x == 4) return 1 else return 0\"));\n-\n- assertEquals(3, exec(\n- \"int x = 5\\n\" +\n- \"if (x == 5) {\\n\" +\n- \" int y = 2\\n\" +\n- \" \\n\" +\n- \" if (y == 2) {\\n\" +\n- \" x = 3\\n\" +\n- \" }\\n\" +\n- \" \\n\" +\n- \"}\\n\" +\n- \"\\n\" +\n- \"return x\\n\"));\n- }\n-\n- public void testWhileStatement() {\n-\n- assertEquals(\"aaaaaa\", exec(\"String c = \\\"a\\\" int x while (x < 5) { ++x c += \\\"a\\\" } return c\"));\n-\n- Object value = exec(\n- \" byte[][] b = new byte[5][5] \\n\" +\n- \" byte x = 0, y \\n\" +\n- \" \\n\" +\n- \" while (x < 5) { \\n\" +\n- \" y = 0 \\n\" +\n- \" \\n\" +\n- \" while (y < 5) { \\n\" +\n- \" b[x][y] = (byte)(x*y) \\n\" +\n- \" ++y \\n\" +\n- \" } \\n\" +\n- \" \\n\" +\n- \" ++x \\n\" +\n- \" } \\n\" +\n- \" \\n\" +\n- \" return b \\n\");\n-\n- byte[][] b = (byte[][])value;\n-\n- for (byte x = 0; x < 5; ++x) {\n- for (byte y = 0; y < 5; ++y) {\n- assertEquals(x*y, b[x][y]);\n- }\n- }\n- }\n-\n- public void testDoWhileStatement() {\n- assertEquals(\"aaaaaa\", exec(\"String c = \\\"a\\\" int x do { c += \\\"a\\\"; ++x } while (x < 5) return c\"));\n-\n- Object value = exec(\n- \" long[][] l = new long[5][5] \\n\" +\n- \" long x = 0, y \\n\" +\n- \" \\n\" +\n- \" do { \\n\" +\n- \" y = 0 \\n\" +\n- \" \\n\" +\n- \" do { \\n\" +\n- \" l[(int)x][(int)y] = x*y; \\n\" +\n- \" ++y \\n\" +\n- \" } while (y < 5) \\n\" +\n- \" \\n\" +\n- \" ++x \\n\" +\n- \" } while (x < 5) \\n\" +\n- \" \\n\" +\n- \" return l \\n\");\n-\n- long[][] l = (long[][])value;\n-\n- for (long x = 0; x < 5; ++x) {\n- for (long y = 0; y < 5; ++y) {\n- assertEquals(x*y, l[(int)x][(int)y]);\n- }\n- }\n- }\n-\n- public void testForStatement() {\n- assertEquals(\"aaaaaa\", exec(\"String c = \\\"a\\\" for (int x = 0; x < 5; ++x) c += \\\"a\\\" return c\"));\n-\n- Object value = exec(\n- \" int[][] i = new int[5][5] \\n\" +\n- \" for (int x = 0; x < 5; ++x) { \\n\" +\n- \" for (int y = 0; y < 5; ++y) { \\n\" +\n- \" i[x][y] = x*y \\n\" +\n- \" } \\n\" +\n- \" } \\n\" +\n- \" \\n\" +\n- \" return i \\n\");\n-\n- int[][] i = (int[][])value;\n-\n- for (int x = 0; x < 5; ++x) {\n- for (int y = 0; y < 5; ++y) {\n- assertEquals(x*y, i[x][y]);\n- }\n- }\n- }\n-\n public void testDeclarationStatement() {\n- assertEquals((byte)2, exec(\"byte a = 2 return a\"));\n- assertEquals((short)2, exec(\"short a = 2 return a\"));\n- assertEquals((char)2, exec(\"char a = 2 return a\"));\n- assertEquals(2, exec(\"int a = 2 return a\"));\n- assertEquals(2L, exec(\"long a = 2 return a\"));\n- assertEquals(2F, exec(\"float a = 2 return a\"));\n- assertEquals(2.0, exec(\"double a = 2 return a\"));\n- assertEquals(false, exec(\"boolean a = false return a\"));\n- assertEquals(\"string\", exec(\"String a = \\\"string\\\" return a\"));\n- assertEquals(HashMap.class, exec(\"Map<String, Object> a = new HashMap<String, Object>() return a\").getClass());\n-\n- assertEquals(byte[].class, exec(\"byte[] a = new byte[1] return a\").getClass());\n- assertEquals(short[].class, exec(\"short[] a = new short[1] return a\").getClass());\n- assertEquals(char[].class, exec(\"char[] a = new char[1] return a\").getClass());\n- assertEquals(int[].class, exec(\"int[] a = new int[1] return a\").getClass());\n- assertEquals(long[].class, exec(\"long[] a = new long[1] return a\").getClass());\n- assertEquals(float[].class, exec(\"float[] a = new float[1] return a\").getClass());\n- assertEquals(double[].class, exec(\"double[] a = new double[1] return a\").getClass());\n- assertEquals(boolean[].class, exec(\"boolean[] a = new boolean[1] return a\").getClass());\n- assertEquals(String[].class, exec(\"String[] a = new String[1] return a\").getClass());\n- assertEquals(Map[].class, exec(\"Map<String,Object>[] a = new Map<String,Object>[1] return a\").getClass());\n-\n- assertEquals(byte[][].class, exec(\"byte[][] a = new byte[1][2] return a\").getClass());\n- assertEquals(short[][][].class, exec(\"short[][][] a = new short[1][2][3] return a\").getClass());\n- assertEquals(char[][][][].class, exec(\"char[][][][] a = new char[1][2][3][4] return a\").getClass());\n- assertEquals(int[][][][][].class, exec(\"int[][][][][] a = new int[1][2][3][4][5] return a\").getClass());\n- assertEquals(long[][].class, exec(\"long[][] a = new long[1][2] return a\").getClass());\n- assertEquals(float[][][].class, exec(\"float[][][] a = new float[1][2][3] return a\").getClass());\n- assertEquals(double[][][][].class, exec(\"double[][][][] a = new double[1][2][3][4] return a\").getClass());\n- assertEquals(boolean[][][][][].class, exec(\"boolean[][][][][] a = new boolean[1][2][3][4][5] return a\").getClass());\n- assertEquals(String[][].class, exec(\"String[][] a = new String[1][2] return a\").getClass());\n- assertEquals(Map[][][].class, exec(\"Map<String,Object>[][][] a = new Map<String,Object>[1][2][3] return a\").getClass());\n+ assertEquals((byte)2, exec(\"byte a = 2; return a\"));\n+ assertEquals((short)2, exec(\"short a = 2; return a\"));\n+ assertEquals((char)2, exec(\"char a = 2; return a\"));\n+ assertEquals(2, exec(\"int a = 2; return a\"));\n+ assertEquals(2L, exec(\"long a = 2; return a\"));\n+ assertEquals(2F, exec(\"float a = 2; return a\"));\n+ assertEquals(2.0, exec(\"double a = 2; return a\"));\n+ assertEquals(false, exec(\"boolean a = false; return a\"));\n+ assertEquals(\"string\", exec(\"String a = \\\"string\\\"; return a\"));\n+ assertEquals(HashMap.class, exec(\"Map<String, Object> a = new HashMap<String, Object>(); return a\").getClass());\n+\n+ assertEquals(byte[].class, exec(\"byte[] a = new byte[1]; return a\").getClass());\n+ assertEquals(short[].class, exec(\"short[] a = new short[1]; return a\").getClass());\n+ assertEquals(char[].class, exec(\"char[] a = new char[1]; return a\").getClass());\n+ assertEquals(int[].class, exec(\"int[] a = new int[1]; return a\").getClass());\n+ assertEquals(long[].class, exec(\"long[] a = new long[1]; return a\").getClass());\n+ assertEquals(float[].class, exec(\"float[] a = new float[1]; return a\").getClass());\n+ assertEquals(double[].class, exec(\"double[] a = new double[1]; return a\").getClass());\n+ assertEquals(boolean[].class, exec(\"boolean[] a = new boolean[1]; return a\").getClass());\n+ assertEquals(String[].class, exec(\"String[] a = new String[1]; return a\").getClass());\n+ assertEquals(Map[].class, exec(\"Map<String,Object>[] a = new Map<String,Object>[1]; return a\").getClass());\n+\n+ assertEquals(byte[][].class, exec(\"byte[][] a = new byte[1][2]; return a\").getClass());\n+ assertEquals(short[][][].class, exec(\"short[][][] a = new short[1][2][3]; return a\").getClass());\n+ assertEquals(char[][][][].class, exec(\"char[][][][] a = new char[1][2][3][4]; return a\").getClass());\n+ assertEquals(int[][][][][].class, exec(\"int[][][][][] a = new int[1][2][3][4][5]; return a\").getClass());\n+ assertEquals(long[][].class, exec(\"long[][] a = new long[1][2]; return a\").getClass());\n+ assertEquals(float[][][].class, exec(\"float[][][] a = new float[1][2][3]; return a\").getClass());\n+ assertEquals(double[][][][].class, exec(\"double[][][][] a = new double[1][2][3][4]; return a\").getClass());\n+ assertEquals(boolean[][][][][].class, exec(\"boolean[][][][][] a = new boolean[1][2][3][4][5]; return a\").getClass());\n+ assertEquals(String[][].class, exec(\"String[][] a = new String[1][2]; return a\").getClass());\n+ assertEquals(Map[][][].class, exec(\"Map<String,Object>[][][] a = new Map<String,Object>[1][2][3]; return a\").getClass());\n }\n-\n- public void testContinueStatement() {\n- assertEquals(9, exec(\"int x = 0, y = 0 while (x < 10) { ++x if (x == 1) continue ++y } return y\"));\n- }\n-\n- public void testBreakStatement() {\n- assertEquals(4, exec(\"int x = 0, y = 0 while (x < 10) { ++x if (x == 5) break ++y } return y\"));\n+ \n+ public void testExpression() {\n+ assertEquals(10, exec(\"10\"));\n+ assertEquals(10, exec(\"5 + 5\"));\n+ assertEquals(10, exec(\"5 + 5\"));\n+ assertEquals(10, exec(\"params.param == 'yes' ? 10 : 5\", Collections.singletonMap(\"param\", \"yes\")));\n }\n \n @SuppressWarnings(\"rawtypes\")\n public void testReturnStatement() {\n assertEquals(10, exec(\"return 10\"));\n- assertEquals(5, exec(\"int x = 5 return x\"));\n- assertEquals(4, exec(\"int[] x = new int[2] x[1] = 4 return x[1]\"));\n- assertEquals(5, ((short[])exec(\"short[] s = new short[3] s[1] = 5 return s\"))[1]);\n- assertEquals(10, ((Map)exec(\"Map<String,Object> s = new HashMap< String,Object>() s.put(\\\"x\\\", 10) return s\")).get(\"x\"));\n+ assertEquals(5, exec(\"int x = 5; return x\"));\n+ assertEquals(4, exec(\"int[] x = new int[2]; x[1] = 4; return x[1]\"));\n+ assertEquals(5, ((short[])exec(\"short[] s = new short[3]; s[1] = 5; return s\"))[1]);\n+ assertEquals(10, ((Map)exec(\"Map<String,Object> s = new HashMap< String,Object>(); s.put(\\\"x\\\", 10); return s\")).get(\"x\"));\n }\n }", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTests.java", "status": "modified" }, { "diff": "@@ -19,6 +19,8 @@\n \n package org.elasticsearch.painless;\n \n+import java.util.Locale;\n+\n public class StringTests extends ScriptTestCase {\n \n public void testAppend() {\n@@ -63,6 +65,21 @@ public void testAppend() {\n assertEquals(\"cat\" + \"cat\", exec(\"String s = 'cat'; return s + s;\"));\n }\n \n+ public void testAppendMultiple() {\n+ assertEquals(\"cat\" + true + \"abc\" + null, exec(\"String s = \\\"cat\\\"; return s + true + 'abc' + null;\"));\n+ }\n+\n+ public void testAppendMany() {\n+ StringBuilder script = new StringBuilder(\"String s = \\\"cat\\\"; return s\");\n+ StringBuilder result = new StringBuilder(\"cat\");\n+ for (int i = 0; i < 200 /* indy limit */ + 10; i++) {\n+ final String s = String.format(Locale.ROOT, \"%03d\", i);\n+ script.append(\" + '\").append(s).append(\"'.toString()\");\n+ result.append(s);\n+ }\n+ assertEquals(result.toString(), exec(script.toString()));\n+ }\n+\n public void testStringAPI() {\n assertEquals(\"\", exec(\"return new String();\"));\n assertEquals('x', exec(\"String s = \\\"x\\\"; return s.charAt(0);\"));\n@@ -127,8 +144,8 @@ public void testStringAndCharacter() {\n assertEquals(\"c\", exec(\"return (String)(char)\\\"c\\\"\"));\n assertEquals(\"c\", exec(\"return (String)(char)'c'\"));\n \n- assertEquals('c', exec(\"String s = \\\"c\\\" (char)s\"));\n- assertEquals('c', exec(\"String s = 'c' (char)s\"));\n+ assertEquals('c', exec(\"String s = \\\"c\\\"; (char)s\"));\n+ assertEquals('c', exec(\"String s = 'c'; (char)s\"));\n \n try {\n assertEquals(\"cc\", exec(\"return (String)(char)\\\"cc\\\"\"));\n@@ -145,14 +162,14 @@ public void testStringAndCharacter() {\n }\n \n try {\n- assertEquals('c', exec(\"String s = \\\"cc\\\" (char)s\"));\n+ assertEquals('c', exec(\"String s = \\\"cc\\\"; (char)s\"));\n fail();\n } catch (final ClassCastException cce) {\n assertTrue(cce.getMessage().contains(\"Cannot cast [String] with length greater than one to [char].\"));\n }\n \n try {\n- assertEquals('c', exec(\"String s = 'cc' (char)s\"));\n+ assertEquals('c', exec(\"String s = 'cc'; (char)s\"));\n fail();\n } catch (final ClassCastException cce) {\n assertTrue(cce.getMessage().contains(\"Cannot cast [String] with length greater than one to [char].\"));\n@@ -163,8 +180,8 @@ public void testStringAndCharacter() {\n assertEquals(\"c\", exec(\"return (String)(Character)\\\"c\\\"\"));\n assertEquals(\"c\", exec(\"return (String)(Character)'c'\"));\n \n- assertEquals('c', exec(\"String s = \\\"c\\\" (Character)s\"));\n- assertEquals('c', exec(\"String s = 'c' (Character)s\"));\n+ assertEquals('c', exec(\"String s = \\\"c\\\"; (Character)s\"));\n+ assertEquals('c', exec(\"String s = 'c'; (Character)s\"));\n \n try {\n assertEquals(\"cc\", exec(\"return (String)(Character)\\\"cc\\\"\"));\n@@ -181,14 +198,14 @@ public void testStringAndCharacter() {\n }\n \n try {\n- assertEquals('c', exec(\"String s = \\\"cc\\\" (Character)s\"));\n+ assertEquals('c', exec(\"String s = \\\"cc\\\"; (Character)s\"));\n fail();\n } catch (final ClassCastException cce) {\n assertTrue(cce.getMessage().contains(\"Cannot cast [String] with length greater than one to [Character].\"));\n }\n \n try {\n- assertEquals('c', exec(\"String s = 'cc' (Character)s\"));\n+ assertEquals('c', exec(\"String s = 'cc'; (Character)s\"));\n fail();\n } catch (final ClassCastException cce) {\n assertTrue(cce.getMessage().contains(\"Cannot cast [String] with length greater than one to [Character].\"));", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java", "status": "modified" }, { "diff": "@@ -90,7 +90,7 @@ public void testInfiniteLoops() {\n \"The maximum number of statements that can be executed in a loop has been reached.\"));\n \n expected = expectThrows(PainlessError.class, () -> {\n- exec(\"while (true) {int y = 5}\");\n+ exec(\"while (true) {int y = 5;}\");\n });\n assertTrue(expected.getMessage().contains(\n \"The maximum number of statements that can be executed in a loop has been reached.\"));\n@@ -116,7 +116,7 @@ public void testInfiniteLoops() {\n \"The maximum number of statements that can be executed in a loop has been reached.\"));\n \n expected = expectThrows(PainlessError.class, () -> {\n- exec(\"for (;;) {int x = 5}\");\n+ exec(\"for (;;) {int x = 5;}\");\n fail(\"should have hit PainlessError\");\n });\n assertTrue(expected.getMessage().contains(\n@@ -130,7 +130,7 @@ public void testInfiniteLoops() {\n \"The maximum number of statements that can be executed in a loop has been reached.\"));\n \n RuntimeException parseException = expectThrows(RuntimeException.class, () -> {\n- exec(\"try { int x } catch (PainlessError error) {}\");\n+ exec(\"try { int x; } catch (PainlessError error) {}\");\n fail(\"should have hit ParseException\");\n });\n assertTrue(parseException.getMessage().contains(\"Not a type [PainlessError].\"));", "filename": "modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java", "status": "modified" } ] }
{ "body": "The `include` clause on the `terms` and `significant_terms` aggregations has a parse failure for string values provided for date and ip field types.\nSteps to reproduce:\n\n```\nPUT test\n{\n \"settings\": {\n \"number_of_replicas\": 0,\n \"number_of_shards\":1\n },\n \"mappings\": {\n \"test\": {\n\n \"properties\": {\n \"stringField\": {\n \"type\": \"string\"\n },\n \"dateTimeField\": {\n \"type\": \"date\",\n \"format\": \"date_time\"\n },\n \"ipField\": {\n \"type\": \"ip\"\n }\n }\n }\n }\n}\n\nPOST test/test\n{\"stringField\":\"bar\",\"ipField\":\"192.168.1.0\", \"dateTimeField\":\"2016-04-11T14:02:39.593Z\"}\n\nGET test/_search\n{\n \"size\":0,\n \"aggs\": {\n \"this_works\": {\n \"terms\": {\n \"field\": \"stringField\",\n \"include\":[\"bar\"] \n }\n }\n }\n}\nGET test/_search\n{\n \"size\":0,\n \"aggs\": {\n \"this_does_not_work\": {\n \"terms\": {\n \"field\": \"ipField\",\n \"include\":[\"192.168.1.0\"]\n\n }\n }\n }\n}\nGET test/_search\n{\n \"size\":0,\n \"aggs\": {\n \"this_does_not_work\": {\n \"terms\": {\n \"field\": \"dateTimeField\",\n \"include\":[\"2016-04-11T14:02:39.593Z\"]\n\n }\n }\n }\n}\n```\n\nThe error returned is of the following type\n\n```\n{\n \"error\": {\n \"root_cause\": [\n {\n \"type\": \"number_format_exception\",\n \"reason\": \"For input string: \\\"2016-04-11T14:02:39.593Z\\\"\"\n }\n ],\n \"type\": \"search_phase_execution_exception\",\n \"reason\": \"all shards failed\",\n \"phase\": \"query_fetch\",\n \"grouped\": true,\n \"failed_shards\": [\n {\n \"shard\": 0,\n \"index\": \"test\",\n \"node\": \"Qz_UtnGzTOeYEDLeOgTQLw\",\n \"reason\": {\n \"type\": \"number_format_exception\",\n \"reason\": \"For input string: \\\"2016-04-11T14:02:39.593Z\\\"\"\n }\n }\n ]\n },\n \"status\": 400\n}\n```\n", "comments": [ { "body": "As part of fixing this I've also discovered that the formatting of IP bucket keys for terms agg has changed in master. Previously we had a `key` value that was a long and a formatted 'key_as_string' - now we just have a single `key` value with an ugly encoding as can be seen in this screenshot:\n![sense - a json aware interface to elasticsearch](https://cloud.githubusercontent.com/assets/170925/14748517/0d379772-08b2-11e6-8f35-35af68740675.jpg)\nThis is a side-effect of switching over to byte representations for IPV6 I expect and consequently the shift to StringTerms for the buckets.\nI'm not sure how much of a problem the ugly `key` is is but I will add back a friendlier \"key_as_string\" addition for readable IP addresses.\n", "created_at": "2016-04-22T16:52:00Z" }, { "body": "@jpountz only ip address fields still return `key_as_string` but this seems to be missing for point fields.\n", "created_at": "2016-04-25T18:47:23Z" }, { "body": "@clintongormley @markharwood I opened #18003.\n", "created_at": "2016-04-27T08:31:36Z" }, { "body": "I started this work on formatting include/exclude agg criteria appropriately but it's in the same areas Adrien is currently fixing with formatting agg results appropriately in https://github.com/elastic/elasticsearch/pull/18003\n\nSo blocked on 18003 for now.\n", "created_at": "2016-04-27T15:18:04Z" } ], "number": 17705, "title": "Terms aggregations parse failure for date and ip fields" }
{ "body": "Added calls to DocValueFormat.parse methods to handle parsing of user-supplied `include` and `exclude` strings in `terms` and `significant_terms` aggregations into a form that can be compared with doc values\n\nCloses #17705\n", "number": 18408, "review_comments": [ { "body": "@jpountz rather than adding a specialized test for IP field types I test for regex && format!=DocValueFormat.RAW && (valuesSource instanceof ValuesSource.Bytes) \nIs this too broad?\n", "created_at": "2016-05-18T13:27:53Z" }, { "body": "This sounds right!\n", "created_at": "2016-05-18T14:29:42Z" } ], "title": "Aggregations fix: support include/exclude strings for IP and dates" }
{ "commits": [ { "message": "Aggregations fix: support include/exclude strings formatted for IP and date fields in terms and significant_terms aggregations.\n\nCloses #17705" }, { "message": "Added test for illegal regex-style include clauses on IP fields" } ], "files": [ { "diff": "@@ -211,7 +211,14 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare\n }\n }\n assert execution != null;\n- return execution.create(name, factories, valuesSource, config.format(), bucketCountThresholds, includeExclude, context, parent,\n+ \n+ DocValueFormat format = config.format();\n+ if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {\n+ throw new AggregationExecutionException(\"Aggregation [\" + name + \"] cannot support regular expression style include/exclude \"\n+ + \"settings as they can only be applied to string fields. Use an array of values for include/exclude clauses\");\n+ }\n+ \n+ return execution.create(name, factories, valuesSource, format, bucketCountThresholds, includeExclude, context, parent,\n significanceHeuristic, this, pipelineAggregators, metaData);\n }\n \n@@ -227,7 +234,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare\n }\n IncludeExclude.LongFilter longFilter = null;\n if (includeExclude != null) {\n- longFilter = includeExclude.convertToLongFilter();\n+ longFilter = includeExclude.convertToLongFilter(config.format());\n }\n return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(),\n bucketCountThresholds, context, parent, significanceHeuristic, this, longFilter, pipelineAggregators,\n@@ -248,7 +255,7 @@ Aggregator create(String name, AggregatorFactories factories, ValuesSource value\n AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic,\n SignificantTermsAggregatorFactory termsAggregatorFactory, List<PipelineAggregator> pipelineAggregators,\n Map<String, Object> metaData) throws IOException {\n- final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();\n+ final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format);\n return new SignificantStringTermsAggregator(name, factories, valuesSource, format, bucketCountThresholds, filter,\n aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData);\n }\n@@ -262,7 +269,7 @@ Aggregator create(String name, AggregatorFactories factories, ValuesSource value\n AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic,\n SignificantTermsAggregatorFactory termsAggregatorFactory, List<PipelineAggregator> pipelineAggregators,\n Map<String, Object> metaData) throws IOException {\n- final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();\n+ final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);\n return new GlobalOrdinalsSignificantTermsAggregator(name, factories,\n (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, format, bucketCountThresholds, filter,\n aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData);\n@@ -277,7 +284,7 @@ Aggregator create(String name, AggregatorFactories factories, ValuesSource value\n AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic,\n SignificantTermsAggregatorFactory termsAggregatorFactory, List<PipelineAggregator> pipelineAggregators,\n Map<String, Object> metaData) throws IOException {\n- final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();\n+ final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);\n return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories,\n (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, format, bucketCountThresholds, filter,\n aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -150,8 +150,13 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare\n }\n }\n }\n+ DocValueFormat format = config.format();\n+ if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {\n+ throw new AggregationExecutionException(\"Aggregation [\" + name + \"] cannot support regular expression style include/exclude \"\n+ + \"settings as they can only be applied to string fields. Use an array of values for include/exclude clauses\");\n+ }\n \n- return execution.create(name, factories, valuesSource, order, config.format(), bucketCountThresholds, includeExclude, context, parent,\n+ return execution.create(name, factories, valuesSource, order, format, bucketCountThresholds, includeExclude, context, parent, \n collectMode, showTermDocCountError, pipelineAggregators, metaData);\n }\n \n@@ -171,7 +176,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare\n pipelineAggregators, metaData);\n }\n if (includeExclude != null) {\n- longFilter = includeExclude.convertToLongFilter();\n+ longFilter = includeExclude.convertToLongFilter(config.format());\n }\n return new LongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), order,\n bucketCountThresholds, context, parent, collectMode, showTermDocCountError, longFilter, pipelineAggregators,\n@@ -192,7 +197,7 @@ Aggregator create(String name, AggregatorFactories factories, ValuesSource value\n AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,\n boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)\n throws IOException {\n- final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();\n+ final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format);\n return new StringTermsAggregator(name, factories, valuesSource, order, format, bucketCountThresholds, filter,\n aggregationContext, parent, subAggCollectMode, showTermDocCountError, pipelineAggregators, metaData);\n }\n@@ -211,7 +216,7 @@ Aggregator create(String name, AggregatorFactories factories, ValuesSource value\n AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,\n boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)\n throws IOException {\n- final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();\n+ final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);\n return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource, order,\n format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError,\n pipelineAggregators, metaData);\n@@ -231,7 +236,7 @@ Aggregator create(String name, AggregatorFactories factories, ValuesSource value\n AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,\n boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)\n throws IOException {\n- final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();\n+ final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);\n return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource,\n order, format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError,\n pipelineAggregators, metaData);", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java", "status": "modified" }, { "diff": "@@ -43,6 +43,7 @@\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentParser;\n+import org.elasticsearch.search.DocValueFormat;\n import org.elasticsearch.search.aggregations.support.ValuesSource;\n import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals;\n \n@@ -135,7 +136,8 @@ public boolean accept(BytesRef value) {\n }\n \n public static abstract class OrdinalsFilter {\n- public abstract LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException;\n+ public abstract LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource)\n+ throws IOException;\n \n }\n \n@@ -152,7 +154,8 @@ private AutomatonBackedOrdinalsFilter(Automaton automaton) {\n *\n */\n @Override\n- public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException {\n+ public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource)\n+ throws IOException {\n LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());\n TermsEnum globalTermsEnum;\n Terms globalTerms = new DocValuesTerms(globalOrdinals);\n@@ -179,7 +182,7 @@ public TermListBackedOrdinalsFilter(SortedSet<BytesRef> includeValues, SortedSet\n @Override\n public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, WithOrdinals valueSource) throws IOException {\n LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());\n- if(includeValues!=null){\n+ if (includeValues != null) {\n for (BytesRef term : includeValues) {\n long ord = globalOrdinals.lookupTerm(term);\n if (ord >= 0) {\n@@ -534,33 +537,46 @@ private Automaton toAutomaton() {\n return a;\n }\n \n- public StringFilter convertToStringFilter() {\n+ public StringFilter convertToStringFilter(DocValueFormat format) {\n if (isRegexBased()) {\n return new AutomatonBackedStringFilter(toAutomaton());\n }\n- return new TermListBackedStringFilter(includeValues, excludeValues);\n+ return new TermListBackedStringFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format));\n }\n \n- public OrdinalsFilter convertToOrdinalsFilter() {\n+ private static SortedSet<BytesRef> parseForDocValues(SortedSet<BytesRef> endUserFormattedValues, DocValueFormat format) {\n+ SortedSet<BytesRef> result = endUserFormattedValues;\n+ if (endUserFormattedValues != null) {\n+ if (format != DocValueFormat.RAW) {\n+ result = new TreeSet<>();\n+ for (BytesRef formattedVal : endUserFormattedValues) {\n+ result.add(format.parseBytesRef(formattedVal.utf8ToString()));\n+ }\n+ }\n+ }\n+ return result;\n+ }\n+\n+ public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) {\n \n if (isRegexBased()) {\n return new AutomatonBackedOrdinalsFilter(toAutomaton());\n }\n- return new TermListBackedOrdinalsFilter(includeValues, excludeValues);\n+ return new TermListBackedOrdinalsFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format));\n }\n \n- public LongFilter convertToLongFilter() {\n+ public LongFilter convertToLongFilter(DocValueFormat format) {\n int numValids = includeValues == null ? 0 : includeValues.size();\n int numInvalids = excludeValues == null ? 0 : excludeValues.size();\n LongFilter result = new LongFilter(numValids, numInvalids);\n if (includeValues != null) {\n for (BytesRef val : includeValues) {\n- result.addAccept(Long.parseLong(val.utf8ToString()));\n+ result.addAccept(format.parseLong(val.utf8ToString(), false, null));\n }\n }\n if (excludeValues != null) {\n for (BytesRef val : excludeValues) {\n- result.addReject(Long.parseLong(val.utf8ToString()));\n+ result.addReject(format.parseLong(val.utf8ToString(), false, null));\n }\n }\n return result;\n@@ -572,13 +588,13 @@ public LongFilter convertToDoubleFilter() {\n LongFilter result = new LongFilter(numValids, numInvalids);\n if (includeValues != null) {\n for (BytesRef val : includeValues) {\n- double dval=Double.parseDouble(val.utf8ToString());\n+ double dval = Double.parseDouble(val.utf8ToString());\n result.addAccept(NumericUtils.doubleToSortableLong(dval));\n }\n }\n if (excludeValues != null) {\n for (BytesRef val : excludeValues) {\n- double dval=Double.parseDouble(val.utf8ToString());\n+ double dval = Double.parseDouble(val.utf8ToString());\n result.addReject(NumericUtils.doubleToSortableLong(dval));\n }\n }", "filename": "core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java", "status": "modified" }, { "diff": "@@ -117,6 +117,33 @@ setup:\n \n - match: { aggregations.ip_terms.buckets.1.doc_count: 1 }\n \n+ - do:\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"ip_terms\" : { \"terms\" : { \"field\" : \"ip\", \"include\" : [ \"127.0.0.1\" ] } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.ip_terms.buckets: 1 }\n+\n+ - match: { aggregations.ip_terms.buckets.0.key: \"127.0.0.1\" }\n+\n+ - do:\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"ip_terms\" : { \"terms\" : { \"field\" : \"ip\", \"exclude\" : [ \"127.0.0.1\" ] } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.ip_terms.buckets: 1 }\n+\n+ - match: { aggregations.ip_terms.buckets.0.key: \"::1\" }\n+\n+ - do:\n+ catch: request\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"ip_terms\" : { \"terms\" : { \"field\" : \"ip\", \"exclude\" : \"127.*\" } } } }\n+\n+ \n+\n ---\n \"Boolean test\":\n - do:\n@@ -300,4 +327,27 @@ setup:\n - match: { aggregations.date_terms.buckets.1.key_as_string: \"2014-09-01T00:00:00.000Z\" }\n \n - match: { aggregations.date_terms.buckets.1.doc_count: 1 }\n+ \n+ - do:\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"date_terms\" : { \"terms\" : { \"field\" : \"date\", \"include\" : [ \"2016-05-03\" ] } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.date_terms.buckets: 1 }\n+ \n+ - match: { aggregations.date_terms.buckets.0.key_as_string: \"2016-05-03T00:00:00.000Z\" }\n+ \n+ - match: { aggregations.date_terms.buckets.0.doc_count: 2 } \n+ \n+ - do:\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"date_terms\" : { \"terms\" : { \"field\" : \"date\", \"exclude\" : [ \"2016-05-03\" ] } } } }\n+\n+ - match: { hits.total: 3 }\n+\n+ - length: { aggregations.date_terms.buckets: 1 }\n+ \n+ - match: { aggregations.date_terms.buckets.0.key_as_string: \"2014-09-01T00:00:00.000Z\" }\n \n+ - match: { aggregations.date_terms.buckets.0.doc_count: 1 } ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml", "status": "modified" }, { "diff": "@@ -121,3 +121,28 @@\n - is_false: aggregations.ip_terms.buckets.0.key_as_string\n \n - match: { aggregations.ip_terms.buckets.0.doc_count: 1 }\n+\n+ - do:\n+ search:\n+ body: { \"query\" : { \"exists\" : { \"field\" : \"ip\" } }, \"aggs\" : { \"ip_terms\" : { \"significant_terms\" : { \"field\" : \"ip\", \"min_doc_count\" : 1, \"include\" : [ \"::1\" ] } } } }\n+\n+ - match: { hits.total: 1 }\n+\n+ - length: { aggregations.ip_terms.buckets: 1 }\n+\n+ - match: { aggregations.ip_terms.buckets.0.key: \"::1\" }\n+ \n+ - do:\n+ search:\n+ body: { \"query\" : { \"exists\" : { \"field\" : \"ip\" } }, \"aggs\" : { \"ip_terms\" : { \"significant_terms\" : { \"field\" : \"ip\", \"min_doc_count\" : 1, \"exclude\" : [ \"::1\" ] } } } }\n+\n+ - match: { hits.total: 1 }\n+\n+ - length: { aggregations.ip_terms.buckets: 0 }\n+ \n+ - do:\n+ catch: request\n+ search:\n+ body: { \"size\" : 0, \"aggs\" : { \"ip_terms\" : { \"significant_terms\" : { \"field\" : \"ip\", \"exclude\" : \"127.*\" } } } }\n+ \n+ ", "filename": "rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml", "status": "modified" } ] }
{ "body": "Last week I got a problem that elastic search didn't support back quote and I found out the reason. It seems that elastic missed 'modifier symbol' in symbol matcher. I found code in CharMatcher.java:\n`SYMBOL {`\n`@Override`\n `public boolean isTokenChar(int c) {`\n `switch (Character.getType(c)) {`\n `case Character.CURRENCY_SYMBOL:`\n `case Character.MATH_SYMBOL:`\n `case Character.OTHER_SYMBOL:`\n `return true;`\n `default:`\n `return false;`\n `}`\n `}`\n `}`\nAppearently Character.MODIFIER_SYMBOL is missing here. I think it should be a bug.\n", "comments": [ { "body": "@jpountz could you look at this please?\n", "created_at": "2016-05-17T09:37:55Z" } ], "number": 18388, "title": "Elastic search missing modifier symbol in symbol matcher." }
{ "body": "Closes #18388\n", "number": 18402, "review_comments": [ { "body": "Maybe include back quote like the original issue?\n", "created_at": "2016-05-17T15:26:18Z" } ], "title": "Add `Character.MODIFIER_SYMBOL` to the list of symbol categories." }
{ "commits": [ { "message": "Add `Character.MODIFIER_SYMBOL` to the list of symbol categories. #18402\n\nCloses #18388" } ], "files": [ { "diff": "@@ -88,6 +88,7 @@ public boolean isTokenChar(int c) {\n case Character.CURRENCY_SYMBOL:\n case Character.MATH_SYMBOL:\n case Character.OTHER_SYMBOL:\n+ case Character.MODIFIER_SYMBOL:\n return true;\n default:\n return false;", "filename": "core/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java", "status": "modified" }, { "diff": "@@ -0,0 +1,71 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.index.analysis;\n+\n+import org.elasticsearch.test.ESTestCase;\n+\n+public class CharMatcherTests extends ESTestCase {\n+\n+ public void testLetter() {\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('a')); // category Ll\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('é')); // category Ll\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('A')); // category Lu\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('Å')); // category Lu\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('ʰ')); // category Lm\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('ª')); // category Lo\n+ assertTrue(CharMatcher.Basic.LETTER.isTokenChar('Dž')); // category Lt\n+ assertFalse(CharMatcher.Basic.LETTER.isTokenChar(' '));\n+ assertFalse(CharMatcher.Basic.LETTER.isTokenChar('0'));\n+ assertFalse(CharMatcher.Basic.LETTER.isTokenChar('!'));\n+ }\n+\n+ public void testSpace() {\n+ assertTrue(CharMatcher.Basic.WHITESPACE.isTokenChar(' '));\n+ assertTrue(CharMatcher.Basic.WHITESPACE.isTokenChar('\\t'));\n+ assertFalse(CharMatcher.Basic.WHITESPACE.isTokenChar('\\u00A0')); // nbsp\n+ }\n+\n+ public void testNumber() {\n+ assertTrue(CharMatcher.Basic.DIGIT.isTokenChar('1'));\n+ assertTrue(CharMatcher.Basic.DIGIT.isTokenChar('١')); // ARABIC-INDIC DIGIT ONE\n+ assertFalse(CharMatcher.Basic.DIGIT.isTokenChar(','));\n+ assertFalse(CharMatcher.Basic.DIGIT.isTokenChar('a'));\n+ }\n+\n+ public void testSymbol() {\n+ assertTrue(CharMatcher.Basic.SYMBOL.isTokenChar('$')); // category Sc\n+ assertTrue(CharMatcher.Basic.SYMBOL.isTokenChar('+')); // category Sm\n+ assertTrue(CharMatcher.Basic.SYMBOL.isTokenChar('`')); // category Sm\n+ assertTrue(CharMatcher.Basic.SYMBOL.isTokenChar('^')); // category Sk\n+ assertTrue(CharMatcher.Basic.SYMBOL.isTokenChar('¦')); // category Sc\n+ assertFalse(CharMatcher.Basic.SYMBOL.isTokenChar(' '));\n+ }\n+\n+ public void testPunctuation() {\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar('(')); // category Ps\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar(')')); // category Pe\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar('_')); // category Pc\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar('!')); // category Po\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar('-')); // category Pd\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar('«')); // category Pi\n+ assertTrue(CharMatcher.Basic.PUNCTUATION.isTokenChar('»')); // category Pf\n+ assertFalse(CharMatcher.Basic.PUNCTUATION.isTokenChar(' '));\n+ }\n+}", "filename": "core/src/test/java/org/elasticsearch/index/analysis/CharMatcherTests.java", "status": "added" } ] }